From 1620bf39617c8a80dd22d8c88693aa9bc129c0ea Mon Sep 17 00:00:00 2001 From: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:57:12 +0530 Subject: [PATCH 01/29] fix: trim space from git repo Url on create and update material (#4787) * trim space from workflow request to ci -runner * remove trailing and leading spaces from url on create and update material * use strings.TrimSpace directly --- pkg/bean/app.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 79884460dc..fc82ed6313 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -82,11 +82,12 @@ type GitMaterial struct { FilterPattern []string `json:"filterPattern"` } -// UpdateSanitisedGitRepoUrl will remove all trailing slashes from git repository url +// UpdateSanitisedGitRepoUrl will remove all trailing slashes , leading and trailing spaces from git repository url func (m *GitMaterial) UpdateSanitisedGitRepoUrl() { for strings.HasSuffix(m.Url, "/") { m.Url = strings.TrimSuffix(m.Url, "/") } + m.Url = strings.TrimSpace(m.Url) } type CiMaterial struct { From 3851f82a8c1fb0bcb81bd5661275d67e761d99c8 Mon Sep 17 00:00:00 2001 From: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Date: Mon, 18 Mar 2024 15:39:15 +0530 Subject: [PATCH 02/29] feat: active inactive user phase 3 (#4649) * listing user and group changes * default value chnage * group listing optimisation * wire_gen * order by in group * default values * discard * updated on * case insensitive * script number change * specs * review chnages * sql update * id for user * script number chnage * review comments-1 * review comments * review comments * rest handler remove methods * validation for delete * delete user handling * review comments * review comments * bulk delete user and permission group * legacy code fix from loop in query to bulk query * export csv filters support * Revert "export csv filters support" This reverts commit 4448c9e0bcf69e7f583ed75d5931869aa0fd39dc. * export csv filters * bulk delete support with filters * validation * method break * refactor the method * condition * open api specs * specs * refactoring filters in bulk delete rolegroup * comments * comments * script * group listing * review comments * review comments * review comments * review comments * user delete self-review * error handling * renaming helpers * commets * checks and renaming * last login order * script number change * rolegroup migration * name change * backward compatibility handling * self review name change * chart-group -manager-fix * userrolegroups * change chnage operation * specs update * groups * find by componet id * job project id * sql script chnage * script number change --- api/auth/user/UserRestHandler.go | 62 ++++++++++------ api/auth/user/util/util.go | 8 +++ api/bean/AppView.go | 2 + api/bean/UserRequest.go | 33 +++++---- .../appWorkflow/AppWorkflowRepository.go | 1 + .../AppListingRepositoryQueryBuilder.go | 2 +- pkg/app/AppListingService.go | 1 + pkg/auth/user/RoleGroupService.go | 8 ++- pkg/auth/user/UserService.go | 72 ++++++++++++++----- pkg/auth/user/repository/UserRepository.go | 2 +- .../helper/UserRepositoryQueryBuilder.go | 6 +- scripts/sql/228_user_role_timeout.down.sql | 3 + scripts/sql/228_user_role_timeout.up.sql | 5 ++ specs/user_policy.yaml | 51 ++++++++++--- 14 files changed, 187 insertions(+), 69 deletions(-) create mode 100644 api/auth/user/util/util.go create mode 100644 scripts/sql/228_user_role_timeout.down.sql create mode 100644 scripts/sql/228_user_role_timeout.up.sql diff --git a/api/auth/user/UserRestHandler.go b/api/auth/user/UserRestHandler.go index 296fc7568a..f1a32d1238 100644 --- a/api/auth/user/UserRestHandler.go +++ b/api/auth/user/UserRestHandler.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + util2 "github.com/devtron-labs/devtron/api/auth/user/util" "github.com/devtron-labs/devtron/pkg/auth/user/helper" "github.com/gorilla/schema" "net/http" @@ -110,6 +111,23 @@ func (handler UserRestHandlerImpl) CreateUser(w http.ResponseWriter, r *http.Req userInfo.UserId = userId handler.logger.Infow("request payload, CreateUser", "payload", userInfo) + // struct Validations + handler.logger.Infow("request payload, CreateUser ", "payload", userInfo) + err = handler.validator.Struct(userInfo) + if err != nil { + handler.logger.Errorw("validation err, CreateUser", "err", err, "payload", userInfo) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + // Doing this as api is not compatible with previous release of dashboard, groups has been migrated to userRoleGroups + isGroupsPresent := util2.IsGroupsPresent(userInfo.Groups) + if isGroupsPresent { + handler.logger.Errorw("validation error , createUser ", "err", err, "payload", userInfo) + err := &util.ApiError{Code: "406", HttpStatusCode: 406, UserMessage: "Not compatible with request", InternalMessage: "Not compatible with the request payload, as groups has been migrated to userRoleGroups"} + common.WriteJsonResp(w, err, nil, http.StatusNotAcceptable) + return + } + // RBAC enforcer applying token := r.Header.Get("token") isActionUserSuperAdmin := false @@ -143,8 +161,8 @@ func (handler UserRestHandlerImpl) CreateUser(w http.ResponseWriter, r *http.Req } // auth check inside groups - if len(userInfo.Groups) > 0 { - groupRoles, err := handler.roleGroupService.FetchRolesForGroups(userInfo.Groups) + if len(userInfo.UserRoleGroup) > 0 { + groupRoles, err := handler.roleGroupService.FetchRolesForUserRoleGroups(userInfo.UserRoleGroup) if err != nil && err != pg.ErrNoRows { handler.logger.Errorw("service err, UpdateUser", "err", err, "payload", userInfo) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) @@ -173,14 +191,6 @@ func (handler UserRestHandlerImpl) CreateUser(w http.ResponseWriter, r *http.Req } //RBAC enforcer Ends - handler.logger.Infow("request payload, CreateUser ", "payload", userInfo) - err = handler.validator.Struct(userInfo) - if err != nil { - handler.logger.Errorw("validation err, CreateUser", "err", err, "payload", userInfo) - common.WriteJsonResp(w, err, nil, http.StatusBadRequest) - return - } - res, err := handler.userService.CreateUser(&userInfo, token, handler.CheckManagerAuth) if err != nil { handler.logger.Errorw("service err, CreateUser", "err", err, "payload", userInfo) @@ -222,6 +232,14 @@ func (handler UserRestHandlerImpl) UpdateUser(w http.ResponseWriter, r *http.Req common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } + // Doing this as api is not compatible with previous release of dashboard,groups has been migrated to userRoleGroups + isGroupsPresent := util2.IsGroupsPresent(userInfo.Groups) + if isGroupsPresent { + handler.logger.Errorw("validation error , createUser ", "err", err, "payload", userInfo) + err := &util.ApiError{Code: "406", HttpStatusCode: 406, UserMessage: "Not compatible with request, please update to latest version", InternalMessage: "Not compatible with the request payload, as groups has been migrated to userRoleGroups"} + common.WriteJsonResp(w, err, nil, http.StatusNotAcceptable) + return + } res, rolesChanged, groupsModified, restrictedGroups, err := handler.userService.UpdateUser(&userInfo, token, handler.CheckManagerAuth) @@ -327,10 +345,10 @@ func (handler UserRestHandlerImpl) GetAllV2(w http.ResponseWriter, r *http.Reque return } var roleFilters []bean.RoleFilter - if len(user.Groups) > 0 { - groupRoleFilters, err := handler.userService.GetRoleFiltersByGroupNames(user.Groups) + if len(user.UserRoleGroup) > 0 { + groupRoleFilters, err := handler.userService.GetRoleFiltersByUserRoleGroups(user.UserRoleGroup) if err != nil { - handler.logger.Errorw("Error in getting role filters by group names", "err", err, "groupNames", user.Groups) + handler.logger.Errorw("Error in getting role filters by group names", "err", err, "UserRoleGroup", user.UserRoleGroup) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } @@ -397,10 +415,10 @@ func (handler UserRestHandlerImpl) GetAll(w http.ResponseWriter, r *http.Request return } var roleFilters []bean.RoleFilter - if len(user.Groups) > 0 { - groupRoleFilters, err := handler.userService.GetRoleFiltersByGroupNames(user.Groups) + if len(user.UserRoleGroup) > 0 { + groupRoleFilters, err := handler.userService.GetRoleFiltersByUserRoleGroups(user.UserRoleGroup) if err != nil { - handler.logger.Errorw("Error in getting role filters by group names", "err", err, "groupNames", user.Groups) + handler.logger.Errorw("Error in getting role filters by group names", "err", err, "UserRoleGroup", user.UserRoleGroup) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } @@ -784,10 +802,10 @@ func (handler UserRestHandlerImpl) FetchRoleGroupsV2(w http.ResponseWriter, r *h return } var roleFilters []bean.RoleFilter - if len(user.Groups) > 0 { - groupRoleFilters, err := handler.userService.GetRoleFiltersByGroupNames(user.Groups) + if len(user.UserRoleGroup) > 0 { + groupRoleFilters, err := handler.userService.GetRoleFiltersByUserRoleGroups(user.UserRoleGroup) if err != nil { - handler.logger.Errorw("Error in getting role filters by group names", "err", err, "groupNames", user.Groups) + handler.logger.Errorw("Error in getting role filters by group names", "err", err, "UserRoleGroup", user.UserRoleGroup) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } @@ -855,10 +873,10 @@ func (handler UserRestHandlerImpl) FetchRoleGroups(w http.ResponseWriter, r *htt return } var roleFilters []bean.RoleFilter - if len(user.Groups) > 0 { - groupRoleFilters, err := handler.userService.GetRoleFiltersByGroupNames(user.Groups) + if len(user.UserRoleGroup) > 0 { + groupRoleFilters, err := handler.userService.GetRoleFiltersByUserRoleGroups(user.UserRoleGroup) if err != nil { - handler.logger.Errorw("Error in getting role filters by group names", "err", err, "groupNames", user.Groups) + handler.logger.Errorw("Error in getting role filters by group names", "err", err, "UserRoleGroup", user.UserRoleGroup) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } diff --git a/api/auth/user/util/util.go b/api/auth/user/util/util.go new file mode 100644 index 0000000000..9870ee7464 --- /dev/null +++ b/api/auth/user/util/util.go @@ -0,0 +1,8 @@ +package util + +func IsGroupsPresent(groups []string) bool { + if len(groups) > 0 { + return true + } + return false +} diff --git a/api/bean/AppView.go b/api/bean/AppView.go index 85054ab6b7..ba54f8494c 100644 --- a/api/bean/AppView.go +++ b/api/bean/AppView.go @@ -72,6 +72,7 @@ type JobContainer struct { JobActualName string `json:"appName""` Description GenericNoteResponseBean `json:"description"` JobCiPipelines []JobCIPipeline `json:"ciPipelines"'` + ProjectId int `json:"projectId"` } type JobCIPipeline struct { @@ -98,6 +99,7 @@ type JobListingContainer struct { EnvironmentName string `sql:"environment_name" json:"environmentName"` LastTriggeredEnvironmentName string `sql:"last_triggered_environment_name" json:"lastTriggeredEnvironmentName"` LastTriggeredEnvironmentId int `sql:"last_triggered_environment_id" json:"lastEnvironmentId"` + ProjectId int `sql:"team_id" json:"projectId"` } type CiPipelineLastSucceededTime struct { diff --git a/api/bean/UserRequest.go b/api/bean/UserRequest.go index 5adead598b..69c39f5d7e 100644 --- a/api/bean/UserRequest.go +++ b/api/bean/UserRequest.go @@ -30,20 +30,21 @@ type UserRole struct { } type UserInfo struct { - Id int32 `json:"id" validate:"number,not-system-admin-userid"` - EmailId string `json:"email_id" validate:"required,not-system-admin-user"` // TODO : have to migrate json key to emailId and also handle backward compatibility - Roles []string `json:"roles,omitempty"` - AccessToken string `json:"access_token,omitempty"` - RoleFilters []RoleFilter `json:"roleFilters"` - Status string `json:"status,omitempty"` - Groups []string `json:"groups"` // this will be deprecated in future do not use - SuperAdmin bool `json:"superAdmin,notnull"` - LastLoginTime time.Time `json:"lastLoginTime"` - UserType string `json:"-"` - LastUsedAt time.Time `json:"-"` - LastUsedByIp string `json:"-"` - Exist bool `json:"-"` - UserId int32 `json:"-"` // created or modified user id + Id int32 `json:"id" validate:"number,not-system-admin-userid"` + EmailId string `json:"email_id" validate:"required,not-system-admin-user"` // TODO : have to migrate json key to emailId and also handle backward compatibility + Roles []string `json:"roles,omitempty"` + AccessToken string `json:"access_token,omitempty"` + RoleFilters []RoleFilter `json:"roleFilters"` + Status string `json:"status,omitempty"` + Groups []string `json:"groups"` // this will be deprecated in future do not use + UserRoleGroup []UserRoleGroup `json:"userRoleGroups"` // role group with metadata + SuperAdmin bool `json:"superAdmin,notnull"` + LastLoginTime time.Time `json:"lastLoginTime"` + UserType string `json:"-"` + LastUsedAt time.Time `json:"-"` + LastUsedByIp string `json:"-"` + Exist bool `json:"-"` + UserId int32 `json:"-"` // created or modified user id } type RoleGroup struct { @@ -145,3 +146,7 @@ type BulkDeleteRequest struct { ListingRequest *ListingRequest `json:"listingRequest,omitempty"` LoggedInUserId int32 `json:"-"` } + +type UserRoleGroup struct { + RoleGroup *RoleGroup `json:"roleGroup"` +} diff --git a/internal/sql/repository/appWorkflow/AppWorkflowRepository.go b/internal/sql/repository/appWorkflow/AppWorkflowRepository.go index e0233c4ba2..edefae620e 100644 --- a/internal/sql/repository/appWorkflow/AppWorkflowRepository.go +++ b/internal/sql/repository/appWorkflow/AppWorkflowRepository.go @@ -500,6 +500,7 @@ func (impl AppWorkflowRepositoryImpl) FindByComponentId(componentId int) ([]*App Where("app_workflow_mapping.component_id= ?", componentId). Where("app_workflow.active = ?", true). Where("app_workflow_mapping.active = ?", true). + Where("app_workflow_mapping.type = ?", CIPIPELINE). Select() return appWorkflowsMapping, err } diff --git a/internal/sql/repository/helper/AppListingRepositoryQueryBuilder.go b/internal/sql/repository/helper/AppListingRepositoryQueryBuilder.go index 42409bb590..655bf2b314 100644 --- a/internal/sql/repository/helper/AppListingRepositoryQueryBuilder.go +++ b/internal/sql/repository/helper/AppListingRepositoryQueryBuilder.go @@ -72,7 +72,7 @@ const ( func (impl AppListingRepositoryQueryBuilder) BuildJobListingQuery(appIDs []int, statuses []string, environmentIds []int, sortOrder string) string { query := "select ci_pipeline.name as ci_pipeline_name,ci_pipeline.id as ci_pipeline_id,app.id as job_id,app.display_name " + - "as job_name, app.app_name,app.description,cwr.started_on,cwr.status,cem.environment_id,cwr.environment_id as last_triggered_environment_id from app left join ci_pipeline on" + + "as job_name, app.app_name,app.description,app.team_id,cwr.started_on,cwr.status,cem.environment_id,cwr.environment_id as last_triggered_environment_id from app left join ci_pipeline on" + " app.id = ci_pipeline.app_id and ci_pipeline.active=true left join (select cw.ci_pipeline_id, cw.status, cw.started_on, cw.environment_id " + " from ci_workflow cw inner join (select ci_pipeline_id, MAX(started_on) max_started_on from ci_workflow group by ci_pipeline_id ) " + "cws on cw.ci_pipeline_id = cws.ci_pipeline_id " + diff --git a/pkg/app/AppListingService.go b/pkg/app/AppListingService.go index 162133327d..f4b09f2e22 100644 --- a/pkg/app/AppListingService.go +++ b/pkg/app/AppListingService.go @@ -477,6 +477,7 @@ func BuildJobListingResponse(jobContainers []*bean.JobListingContainer, JobsLast val.JobId = jobContainer.JobId val.JobName = jobContainer.JobName val.JobActualName = jobContainer.JobActualName + val.ProjectId = jobContainer.ProjectId } if len(val.JobCiPipelines) == 0 { diff --git a/pkg/auth/user/RoleGroupService.go b/pkg/auth/user/RoleGroupService.go index 56a7bbe722..ea1b259e44 100644 --- a/pkg/auth/user/RoleGroupService.go +++ b/pkg/auth/user/RoleGroupService.go @@ -47,7 +47,7 @@ type RoleGroupService interface { FetchRoleGroupsByName(name string) ([]*bean.RoleGroup, error) DeleteRoleGroup(model *bean.RoleGroup) (bool, error) BulkDeleteRoleGroups(request *bean.BulkDeleteRequest) (bool, error) - FetchRolesForGroups(groupNames []string) ([]*bean.RoleFilter, error) + FetchRolesForUserRoleGroups(userRoleGroups []bean.UserRoleGroup) ([]*bean.RoleFilter, error) } type RoleGroupServiceImpl struct { @@ -938,7 +938,11 @@ func (impl RoleGroupServiceImpl) deleteMappingsFromCasbin(groupCasbinNames []str return nil } -func (impl RoleGroupServiceImpl) FetchRolesForGroups(groupNames []string) ([]*bean.RoleFilter, error) { +func (impl RoleGroupServiceImpl) FetchRolesForUserRoleGroups(userRoleGroups []bean.UserRoleGroup) ([]*bean.RoleFilter, error) { + groupNames := make([]string, 0) + for _, userRoleGroup := range userRoleGroups { + groupNames = append(groupNames, userRoleGroup.RoleGroup.Name) + } if len(groupNames) == 0 { return nil, nil } diff --git a/pkg/auth/user/UserService.go b/pkg/auth/user/UserService.go index 33e8d7b6f5..7dec4ae8dd 100644 --- a/pkg/auth/user/UserService.go +++ b/pkg/auth/user/UserService.go @@ -70,7 +70,7 @@ type UserService interface { GetByIdIncludeDeleted(id int32) (*bean.UserInfo, error) UserExists(emailId string) bool UpdateTriggerPolicyForTerminalAccess() (err error) - GetRoleFiltersByGroupNames(groupNames []string) ([]bean.RoleFilter, error) + GetRoleFiltersByUserRoleGroups(userRoleGroups []bean.UserRoleGroup) ([]bean.RoleFilter, error) SaveLoginAudit(emailId, clientIp string, id int32) } @@ -306,7 +306,7 @@ func (impl *UserServiceImpl) CreateUser(userInfo *bean.UserInfo, token string, m pass = append(pass, emailId) userInfo.EmailId = emailId userInfo.Exist = dbUser.Active - userResponse = append(userResponse, &bean.UserInfo{Id: userInfo.Id, EmailId: emailId, Groups: userInfo.Groups, RoleFilters: userInfo.RoleFilters, SuperAdmin: userInfo.SuperAdmin}) + userResponse = append(userResponse, &bean.UserInfo{Id: userInfo.Id, EmailId: emailId, Groups: userInfo.Groups, RoleFilters: userInfo.RoleFilters, SuperAdmin: userInfo.SuperAdmin, UserRoleGroup: userInfo.UserRoleGroup}) } return userResponse, nil @@ -326,6 +326,7 @@ func (impl *UserServiceImpl) updateUserIfExists(userInfo *bean.UserInfo, dbUser } updateUserInfo.RoleFilters = impl.mergeRoleFilter(updateUserInfo.RoleFilters, userInfo.RoleFilters) updateUserInfo.Groups = impl.mergeGroups(updateUserInfo.Groups, userInfo.Groups) + updateUserInfo.UserRoleGroup = impl.mergeUserRoleGroup(updateUserInfo.UserRoleGroup, userInfo.UserRoleGroup) updateUserInfo.UserId = userInfo.UserId updateUserInfo.EmailId = emailId // override case sensitivity updateUserInfo, _, _, _, err = impl.UpdateUser(updateUserInfo, token, managerAuth) @@ -395,8 +396,8 @@ func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emai } // START GROUP POLICY - for _, item := range userInfo.Groups { - userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item) + for _, item := range userInfo.UserRoleGroup { + userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item.RoleGroup.Name) if err != nil { return nil, err } @@ -612,6 +613,24 @@ func (impl *UserServiceImpl) mergeGroups(oldGroups []string, newGroups []string) return groups } +// mergeUserRoleGroup : patches the existing userRoleGroups and new userRoleGroups with unique key name-status-expression, +func (impl UserServiceImpl) mergeUserRoleGroup(oldUserRoleGroups []bean.UserRoleGroup, newUserRoleGroups []bean.UserRoleGroup) []bean.UserRoleGroup { + finalUserRoleGroups := make([]bean.UserRoleGroup, 0) + keyMap := make(map[string]bool) + for _, userRoleGroup := range oldUserRoleGroups { + key := fmt.Sprintf("%s", userRoleGroup.RoleGroup.Name) + finalUserRoleGroups = append(finalUserRoleGroups, userRoleGroup) + keyMap[key] = true + } + for _, userRoleGroup := range newUserRoleGroups { + key := fmt.Sprintf("%s", userRoleGroup.RoleGroup.Name) + if _, ok := keyMap[key]; !ok { + finalUserRoleGroups = append(finalUserRoleGroups, userRoleGroup) + } + } + return finalUserRoleGroups +} + func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, managerAuth func(resource, token string, object string) bool) (*bean.UserInfo, bool, bool, []string, error) { //checking if request for same user is being processed isLocked := impl.getUserReqLockStateById(userInfo.Id) @@ -737,8 +756,8 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m oldGroupMap[oldItem] = oldItem } // START GROUP POLICY - for _, item := range userInfo.Groups { - userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item) + for _, item := range userInfo.UserRoleGroup { + userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item.RoleGroup.Name) if err != nil { return nil, false, false, nil, err } @@ -750,7 +769,7 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m groupsModified = true addedPolicies = append(addedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) } else { - trimmedGroup := strings.TrimPrefix(item, "group:") + trimmedGroup := strings.TrimPrefix(item.RoleGroup.Name, "group:") restrictedGroups = append(restrictedGroups, trimmedGroup) } } @@ -832,7 +851,7 @@ func (impl *UserServiceImpl) GetById(id int32) (*bean.UserInfo, error) { return nil, err } - isSuperAdmin, roleFilters, filterGroups := impl.getUserMetadata(model) + isSuperAdmin, roleFilters, filterGroups, userRoleGroups := impl.getUserMetadata(model) for index, roleFilter := range roleFilters { if roleFilter.Entity == "" { roleFilters[index].Entity = bean2.ENTITY_APPS @@ -842,17 +861,18 @@ func (impl *UserServiceImpl) GetById(id int32) (*bean.UserInfo, error) { } } response := &bean.UserInfo{ - Id: model.Id, - EmailId: model.EmailId, - RoleFilters: roleFilters, - Groups: filterGroups, - SuperAdmin: isSuperAdmin, + Id: model.Id, + EmailId: model.EmailId, + RoleFilters: roleFilters, + Groups: filterGroups, + SuperAdmin: isSuperAdmin, + UserRoleGroup: userRoleGroups, } return response, nil } -func (impl *UserServiceImpl) getUserMetadata(model *repository.UserModel) (bool, []bean.RoleFilter, []string) { +func (impl *UserServiceImpl) getUserMetadata(model *repository.UserModel) (bool, []bean.RoleFilter, []string, []bean.UserRoleGroup) { roles, err := impl.userAuthRepository.GetRolesByUserId(model.Id) if err != nil { impl.logger.Debugw("No Roles Found for user", "id", model.Id) @@ -899,6 +919,7 @@ func (impl *UserServiceImpl) getUserMetadata(model *repository.UserModel) (bool, } var filterGroups []string + var userRoleGroups []bean.UserRoleGroup for _, item := range groups { if strings.Contains(item, "group:") { filterGroups = append(filterGroups, item) @@ -912,6 +933,7 @@ func (impl *UserServiceImpl) getUserMetadata(model *repository.UserModel) (bool, } filterGroups = nil for _, item := range filterGroupsModels { + userRoleGroups = append(userRoleGroups, bean.UserRoleGroup{RoleGroup: &bean.RoleGroup{Name: item.Name, Id: item.Id, Description: item.Description}}) filterGroups = append(filterGroups, item.Name) } } else { @@ -924,7 +946,10 @@ func (impl *UserServiceImpl) getUserMetadata(model *repository.UserModel) (bool, if len(roleFilters) == 0 { roleFilters = make([]bean.RoleFilter, 0) } - return isSuperAdmin, roleFilters, filterGroups + if len(userRoleGroups) == 0 { + userRoleGroups = make([]bean.UserRoleGroup, 0) + } + return isSuperAdmin, roleFilters, filterGroups, userRoleGroups } // GetAll excluding API token user @@ -1009,6 +1034,7 @@ func (impl UserServiceImpl) getUserResponse(model []repository.UserModel, totalC RoleFilters: make([]bean.RoleFilter, 0), Groups: make([]string, 0), LastLoginTime: lastLoginTime, + UserRoleGroup: make([]bean.UserRoleGroup, 0), }) } if len(response) == 0 { @@ -1031,7 +1057,7 @@ func (impl *UserServiceImpl) getAllDetailedUsers(req *bean.ListingRequest) ([]be } var response []bean.UserInfo for _, model := range models { - isSuperAdmin, roleFilters, filterGroups := impl.getUserMetadata(&model) + isSuperAdmin, roleFilters, filterGroups, userRoleGroups := impl.getUserMetadata(&model) lastLoginTime := adapter.GetLastLoginTime(model) for index, roleFilter := range roleFilters { if roleFilter.Entity == "" { @@ -1048,6 +1074,7 @@ func (impl *UserServiceImpl) getAllDetailedUsers(req *bean.ListingRequest) ([]be Groups: filterGroups, SuperAdmin: isSuperAdmin, LastLoginTime: lastLoginTime, + UserRoleGroup: userRoleGroups, }) } if len(response) == 0 { @@ -1064,7 +1091,7 @@ func (impl *UserServiceImpl) GetAllDetailedUsers() ([]bean.UserInfo, error) { } var response []bean.UserInfo for _, model := range models { - isSuperAdmin, roleFilters, filterGroups := impl.getUserMetadata(&model) + isSuperAdmin, roleFilters, filterGroups, _ := impl.getUserMetadata(&model) for index, roleFilter := range roleFilters { if roleFilter.Entity == "" { roleFilters[index].Entity = bean2.ENTITY_APPS @@ -1602,7 +1629,14 @@ func (impl *UserServiceImpl) checkGroupAuth(groupName string, token string, mana return hasAccessToGroup } -func (impl *UserServiceImpl) GetRoleFiltersByGroupNames(groupNames []string) ([]bean.RoleFilter, error) { +func (impl *UserServiceImpl) GetRoleFiltersByUserRoleGroups(userRoleGroups []bean.UserRoleGroup) ([]bean.RoleFilter, error) { + groupNames := make([]string, 0) + for _, userRoleGroup := range userRoleGroups { + groupNames = append(groupNames, userRoleGroup.RoleGroup.Name) + } + if len(groupNames) == 0 { + return nil, nil + } roles, err := impl.roleGroupRepository.GetRolesByGroupNames(groupNames) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting roles by group names", "err", err) @@ -1658,7 +1692,7 @@ func (impl *UserServiceImpl) createOrUpdateUserRolesForOtherEntity(roleFilter be environments := strings.Split(roleFilter.Environment, ",") for _, environment := range environments { for _, entityName := range entityNames { - if managerAuth != nil { + if managerAuth != nil && entity != bean.CHART_GROUP_ENTITY { // check auth only for apps permission, skip for chart group rbacObject := fmt.Sprintf("%s", roleFilter.Team) isValidAuth := managerAuth(casbin2.ResourceUser, token, rbacObject) diff --git a/pkg/auth/user/repository/UserRepository.go b/pkg/auth/user/repository/UserRepository.go index 9335452ec2..3b149b07a4 100644 --- a/pkg/auth/user/repository/UserRepository.go +++ b/pkg/auth/user/repository/UserRepository.go @@ -69,7 +69,7 @@ type UserModel struct { } type UserRoleModel struct { - TableName struct{} `sql:"user_roles"` + TableName struct{} `sql:"user_roles" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` UserId int32 `sql:"user_id,notnull"` RoleId int `sql:"role_id,notnull"` diff --git a/pkg/auth/user/repository/helper/UserRepositoryQueryBuilder.go b/pkg/auth/user/repository/helper/UserRepositoryQueryBuilder.go index c454a3964e..8b9a148c98 100644 --- a/pkg/auth/user/repository/helper/UserRepositoryQueryBuilder.go +++ b/pkg/auth/user/repository/helper/UserRepositoryQueryBuilder.go @@ -18,7 +18,11 @@ func GetQueryForUserListingWithFilters(req *bean.ListingRequest) string { if len(req.SortBy) > 0 && !req.CountCheck { orderCondition += fmt.Sprintf("order by %s ", req.SortBy) - if req.SortOrder == bean2.Desc { + // Handling it for last login as it is time and show order differs on UI. + if req.SortBy == bean2.LastLogin && req.SortOrder == bean2.Asc { + orderCondition += string(bean2.Desc) + } + if req.SortBy == bean2.Email && req.SortOrder == bean2.Desc { orderCondition += string(req.SortOrder) } } diff --git a/scripts/sql/228_user_role_timeout.down.sql b/scripts/sql/228_user_role_timeout.down.sql new file mode 100644 index 0000000000..08ad68a71f --- /dev/null +++ b/scripts/sql/228_user_role_timeout.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE user_roles DROP CONSTRAINT user_roles_timeout_window_configuration_id_fkey; + +ALTER TABLE user_roles DROP COLUMN timeout_window_configuration_id; \ No newline at end of file diff --git a/scripts/sql/228_user_role_timeout.up.sql b/scripts/sql/228_user_role_timeout.up.sql new file mode 100644 index 0000000000..2d041706ac --- /dev/null +++ b/scripts/sql/228_user_role_timeout.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE "public"."user_roles" + ADD COLUMN "timeout_window_configuration_id" int; + +ALTER TABLE "public"."user_roles" ADD FOREIGN KEY ("timeout_window_configuration_id") + REFERENCES "public"."timeout_window_configuration" ("id"); \ No newline at end of file diff --git a/specs/user_policy.yaml b/specs/user_policy.yaml index 555a71017d..7c8691acb5 100644 --- a/specs/user_policy.yaml +++ b/specs/user_policy.yaml @@ -9,7 +9,7 @@ paths: get: summary: Returns all users description: all the template users - operationId: findAllUsers + operationId: GetAllV2 parameters: - name: searchKey @@ -186,9 +186,7 @@ paths: content: application/json: schema: - type: object - items: - $ref: '#/components/schemas/User' + $ref: '#/components/schemas/User' default: description: unexpected error content: @@ -233,10 +231,10 @@ components: email_id: type: string description: Unique valid email-id of user, comma separated emails ids for multiple users - groups: + userRoleGroups: type: array items: - type: string + $ref: '#/components/schemas/UserRoleGroupItem' roleFilters: type: array items: @@ -247,7 +245,7 @@ components: properties: users: items: - $ref: '#/components/schemas/AllUsers' + $ref: '#/components/schemas/AllUsersV2' description: role filters objects totalCount: type: integer @@ -277,6 +275,25 @@ components: type: string format: date-time description: user last login time + AllUsersV2: + type: object + required: + - email_id + properties: + id: + type: integer + description: Unique id of user + email_id: + type: string + description: Unique valid email-id of user, comma separated emails ids for multiple users + userRoleGroups: + type: array + items: + $ref: '#/components/schemas/UserRoleGroupItem' + lastLogin: + type: string + format: date-time + description: user last login time emptyRoleFilter: type: object @@ -354,8 +371,24 @@ components: showAll: type: boolean description: Show all listings - - + UserRoleGroupItem: + type: object + properties: + roleGroup: + $ref: '#/components/schemas/RoleGroup' + RoleGroup: + type: object + properties: + id: + type: integer + format: int32 + description: The ID of the role group + name: + type: string + description: The name of the role group + description: + type: string + description: The description of the role group Error: required: From d68d034cc6e9321b880b952374e95f9e8e3309e7 Mon Sep 17 00:00:00 2001 From: Prakash Date: Mon, 18 Mar 2024 19:02:46 +0530 Subject: [PATCH 03/29] fix: extra check added for mono-repo migraiton (#4764) * extra check added for mono-repo migraiton * comparison of repoNameWithoutPrefix and appstore name for mono repo condition * refactor * remove prefix * fix * remove-gitops-prefix-oss * refactor * refactor * refactor --- .../installedApp/service/AppStoreDeploymentService.go | 10 ++++++---- util/GlobalConfig.go | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go index 6dee8576b9..6b1b2d76a3 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go @@ -55,6 +55,7 @@ import ( "go.opentelemetry.io/otel" "go.uber.org/zap" "net/http" + "regexp" "strings" "time" ) @@ -1066,10 +1067,11 @@ func (impl *AppStoreDeploymentServiceImpl) CheckIfMonoRepoMigrationRequired(inst return false } } - //here will set new git repo name if required to migrate - newGitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoName(installedApp.App.AppName) - //checking weather git repo migration needed or not, if existing git repo and new independent git repo is not same than go ahead with migration - if newGitOpsRepoName != gitOpsRepoName { + appNameGitOpsRepoPattern := installedApp.App.AppName + "$" + regex := regexp.MustCompile(appNameGitOpsRepoPattern) + + // if appName is not in the gitOpsRepoName consider it as mono repo + if !regex.MatchString(gitOpsRepoName) { monoRepoMigrationRequired = true } return monoRepoMigrationRequired diff --git a/util/GlobalConfig.go b/util/GlobalConfig.go index 84516c9c82..954a0e99dd 100644 --- a/util/GlobalConfig.go +++ b/util/GlobalConfig.go @@ -7,7 +7,7 @@ import ( ) type GlobalEnvVariables struct { - GitOpsRepoPrefix string `env:"GITOPS_REPO_PREFIX" envDefault:"devtron"` + GitOpsRepoPrefix string `env:"GITOPS_REPO_PREFIX" envDefault:""` EnableAsyncInstallDevtronChart bool `env:"ENABLE_ASYNC_INSTALL_DEVTRON_CHART" envDefault:"false"` ExposeCiMetrics bool `env:"EXPOSE_CI_METRICS" envDefault:"false"` } From ef2bc3921594e3923a50f3b6731bef7f46854160 Mon Sep 17 00:00:00 2001 From: Raunit Verma <155707586+raunit-verma@users.noreply.github.com> Date: Tue, 19 Mar 2024 18:39:21 +0530 Subject: [PATCH 04/29] added request method in audit logger (#4817) --- api/util/logger.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/api/util/logger.go b/api/util/logger.go index bdeeaf742c..996cde92b8 100644 --- a/api/util/logger.go +++ b/api/util/logger.go @@ -18,6 +18,7 @@ type AuditLoggerDTO struct { QueryParams string `json:"queryParams"` ApiResponseCode int `json:"apiResponseCode"` RequestPayload []byte `json:"requestPayload"` + RequestMethod string `json:"requestMethod"` } type LoggingMiddlewareImpl struct { @@ -61,6 +62,7 @@ func (impl LoggingMiddlewareImpl) LoggingMiddleware(next http.Handler) http.Hand UpdatedOn: time.Now(), QueryParams: r.URL.Query().Encode(), RequestPayload: bodyBuffer.Bytes(), + RequestMethod: r.Method, } // Call the next handler in the chain. next.ServeHTTP(d, r) @@ -71,5 +73,5 @@ func (impl LoggingMiddlewareImpl) LoggingMiddleware(next http.Handler) http.Hand } func LogRequest(auditLogDto *AuditLoggerDTO) { - log.Printf("AUDIT_LOG: urlPath: %s, queryParams: %s,updatedBy: %s, updatedOn: %s, apiResponseCode: %d,requestPayload: %s", auditLogDto.UrlPath, auditLogDto.QueryParams, auditLogDto.UserEmail, auditLogDto.UpdatedOn, auditLogDto.ApiResponseCode, auditLogDto.RequestPayload) + log.Printf("AUDIT_LOG: requestMethod: %s, urlPath: %s, queryParams: %s, updatedBy: %s, updatedOn: %s, apiResponseCode: %d, requestPayload: %s", auditLogDto.RequestMethod, auditLogDto.UrlPath, auditLogDto.QueryParams, auditLogDto.UserEmail, auditLogDto.UpdatedOn, auditLogDto.ApiResponseCode, auditLogDto.RequestPayload) } From 4c59576afa413652548645c487dc3594f936ef1e Mon Sep 17 00:00:00 2001 From: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:50:58 +0530 Subject: [PATCH 05/29] fix: rolefilters correction with all applications and particular application selected. (#4820) * merging issue resolved * all namespace, cluster, kind remove as it is used in key * removing unnecessary code --- pkg/auth/user/UserCommonService.go | 8 ++++++-- pkg/auth/user/bean/bean.go | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/auth/user/UserCommonService.go b/pkg/auth/user/UserCommonService.go index 1091f29052..50443896be 100644 --- a/pkg/auth/user/UserCommonService.go +++ b/pkg/auth/user/UserCommonService.go @@ -638,7 +638,9 @@ func (impl UserCommonServiceImpl) BuildRoleFilterKeyForJobs(roleFilterMap map[st roleFilterMap[key].Environment = fmt.Sprintf("%s,%s", roleFilterMap[key].Environment, role.Environment) } entityArr := strings.Split(roleFilterMap[key].EntityName, ",") - if !containsArr(entityArr, role.EntityName) { + if containsArr(entityArr, bean2.EmptyStringIndicatingAll) { + roleFilterMap[key].EntityName = bean2.EmptyStringIndicatingAll + } else if !containsArr(entityArr, role.EntityName) { roleFilterMap[key].EntityName = fmt.Sprintf("%s,%s", roleFilterMap[key].EntityName, role.EntityName) } workflowArr := strings.Split(roleFilterMap[key].Workflow, ",") @@ -657,7 +659,9 @@ func (impl UserCommonServiceImpl) BuildRoleFilterKeyForOtherEntity(roleFilterMap roleFilterMap[key].Environment = fmt.Sprintf("%s,%s", roleFilterMap[key].Environment, role.Environment) } entityArr := strings.Split(roleFilterMap[key].EntityName, ",") - if !containsArr(entityArr, role.EntityName) { + if containsArr(entityArr, bean2.EmptyStringIndicatingAll) { + roleFilterMap[key].EntityName = bean2.EmptyStringIndicatingAll + } else if !containsArr(entityArr, role.EntityName) { roleFilterMap[key].EntityName = fmt.Sprintf("%s,%s", roleFilterMap[key].EntityName, role.EntityName) } } diff --git a/pkg/auth/user/bean/bean.go b/pkg/auth/user/bean/bean.go index fbc7795952..81212270e6 100644 --- a/pkg/auth/user/bean/bean.go +++ b/pkg/auth/user/bean/bean.go @@ -33,6 +33,7 @@ const ( EMPTY_ROLEFILTER_ENTRY_PLACEHOLDER = "NONE" RoleNotFoundStatusPrefix = "role not fount for any given filter: " EntityJobs = "jobs" + EmptyStringIndicatingAll = "" ) const ( From 3cbf2b18f2f0008141a84930b6eb58901718ef06 Mon Sep 17 00:00:00 2001 From: Prakash Date: Wed, 20 Mar 2024 16:41:55 +0530 Subject: [PATCH 06/29] fix: 5xx 4.0 iter (#4620) * bugs revert * 4th iter code changes * delete dag exec * initial poc (ongoing) of handling grpc errors * grpc error handling at install helm chart and template services * refactoring * refactor * code review changes with some refactoring * removed unused files * refactor --- api/helm-app/service/HelmAppService.go | 17 ++++---- internal/errors/bean.go | 19 +++++++++ internal/util/ErrorUtil.go | 11 +++-- .../deployment/InstalledAppArgoCdService.go | 6 +++ .../trigger/devtronApps/TriggerService.go | 14 +++++-- pkg/errors/utils.go | 41 +++++++++++++++++++ .../DeployementTemplateService.go | 5 +++ pkg/workflow/cd/CdWorkflowCommonService.go | 4 +- pkg/workflow/dag/WorkflowDagExecutor.go | 6 +-- 9 files changed, 104 insertions(+), 19 deletions(-) create mode 100644 internal/errors/bean.go create mode 100644 pkg/errors/utils.go diff --git a/api/helm-app/service/HelmAppService.go b/api/helm-app/service/HelmAppService.go index d0efce9c0f..f6f19732ce 100644 --- a/api/helm-app/service/HelmAppService.go +++ b/api/helm-app/service/HelmAppService.go @@ -11,7 +11,6 @@ import ( "github.com/devtron-labs/devtron/internal/constants" repository2 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/go-pg/pg" - "google.golang.org/grpc/codes" "net/http" "reflect" "strconv" @@ -389,7 +388,7 @@ func (impl *HelmAppServiceImpl) GetDeploymentHistory(ctx context.Context, app *A ReleaseName: app.ReleaseName, } history, err := impl.helmAppClient.GetDeploymentHistory(ctx, req) - if util.GetGRPCErrorDetailedMessage(err) == bean.ErrReleaseNotFound { + if util.GetClientErrorDetailedMessage(err) == bean.ErrReleaseNotFound { err = &util.ApiError{ Code: constants.HelmReleaseNotFound, InternalMessage: bean.ErrReleaseNotFound, @@ -552,16 +551,16 @@ func (impl *HelmAppServiceImpl) DeleteApplication(ctx context.Context, app *AppI deleteApplicationResponse, err := impl.helmAppClient.DeleteApplication(ctx, req) if err != nil { - code, message := util.GetGRPCDetailedError(err) - if code == codes.NotFound { + code, message := util.GetClientDetailedError(err) + if code.IsNotFoundCode() { return nil, &util.ApiError{ - Code: "404", - HttpStatusCode: 200, + Code: strconv.Itoa(http.StatusNotFound), + HttpStatusCode: 200, //need to revisit the status code UserMessage: message, } } impl.logger.Errorw("error in deleting helm application", "err", err) - return nil, errors.New(util.GetGRPCErrorDetailedMessage(err)) + return nil, errors.New(util.GetClientErrorDetailedMessage(err)) } response := &openapi.UninstallReleaseResponse{ @@ -962,6 +961,10 @@ func (impl *HelmAppServiceImpl) TemplateChart(ctx context.Context, templateChart templateChartResponse, err := impl.helmAppClient.TemplateChart(ctx, installReleaseRequest) if err != nil { impl.logger.Errorw("error in templating chart", "err", err) + clientErrCode, errMsg := util.GetClientDetailedError(err) + if clientErrCode.IsInvalidArgumentCode() { + return nil, &util.ApiError{HttpStatusCode: http.StatusConflict, Code: strconv.Itoa(http.StatusConflict), InternalMessage: errMsg, UserMessage: errMsg} + } return nil, err } diff --git a/internal/errors/bean.go b/internal/errors/bean.go new file mode 100644 index 0000000000..27b2f46b06 --- /dev/null +++ b/internal/errors/bean.go @@ -0,0 +1,19 @@ +package errors + +import "google.golang.org/grpc/codes" + +type ClientStatusCode struct { + Code codes.Code +} + +func (r *ClientStatusCode) IsInvalidArgumentCode() bool { + return r.Code == codes.InvalidArgument +} + +func (r *ClientStatusCode) IsNotFoundCode() bool { + return r.Code == codes.NotFound +} + +func (r *ClientStatusCode) IsFailedPreconditionCode() bool { + return r.Code == codes.FailedPrecondition +} diff --git a/internal/util/ErrorUtil.go b/internal/util/ErrorUtil.go index 52df683fc6..6e98e2baf0 100644 --- a/internal/util/ErrorUtil.go +++ b/internal/util/ErrorUtil.go @@ -19,6 +19,7 @@ package util import ( "fmt" + "github.com/devtron-labs/devtron/internal/errors" "github.com/go-pg/pg" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -50,16 +51,18 @@ func IsErrNoRows(err error) bool { return pg.ErrNoRows == err } -func GetGRPCErrorDetailedMessage(err error) string { +func GetClientErrorDetailedMessage(err error) string { if errStatus, ok := status.FromError(err); ok { return errStatus.Message() } return err.Error() } -func GetGRPCDetailedError(err error) (codes.Code, string) { +func GetClientDetailedError(err error) (*errors.ClientStatusCode, string) { + grpcCode := &errors.ClientStatusCode{Code: codes.Unknown} if errStatus, ok := status.FromError(err); ok { - return errStatus.Code(), errStatus.Message() + grpcCode.Code = errStatus.Code() + return grpcCode, errStatus.Message() } - return codes.Unknown, err.Error() + return grpcCode, err.Error() } diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go index 26e3e9e31a..1941bbedc8 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppArgoCdService.go @@ -13,6 +13,8 @@ import ( repository5 "github.com/devtron-labs/devtron/pkg/cluster/repository" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/go-pg/pg" + "net/http" + "strconv" "strings" "time" ) @@ -108,6 +110,10 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAndSyncACDApps(installAppVersio err = impl.argoClientWrapperService.SyncArgoCDApplicationIfNeededAndRefresh(ctx, acdAppName) if err != nil { impl.Logger.Errorw("error in getting argocd application with normal refresh", "err", err, "argoAppName", installAppVersionRequest.ACDAppName) + clientErrCode, errMsg := util.GetClientDetailedError(err) + if clientErrCode.IsFailedPreconditionCode() { + return &util.ApiError{HttpStatusCode: http.StatusPreconditionFailed, Code: strconv.Itoa(http.StatusPreconditionFailed), InternalMessage: errMsg, UserMessage: errMsg} + } return err } if !impl.acdConfig.ArgoCDAutoSyncEnabled { diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index 1480c2e179..995659f69f 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -38,6 +38,7 @@ import ( bean5 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/helper" + clientErrors "github.com/devtron-labs/devtron/pkg/errors" "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" "github.com/devtron-labs/devtron/pkg/pipeline" @@ -992,7 +993,11 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean updateApplicationResponse, err := impl.helmAppClient.UpdateApplication(ctx, req) if err != nil { impl.logger.Errorw("error in updating helm application for cd pipeline", "err", err) - if util.GetGRPCErrorDetailedMessage(err) == context.Canceled.Error() { + apiError := clientErrors.ConvertToApiError(err) + if apiError != nil { + return false, apiError + } + if util.GetClientErrorDetailedMessage(err) == context.Canceled.Error() { err = errors.New(pipelineConfig.NEW_DEPLOYMENT_INITIATED) } return false, err @@ -1009,7 +1014,7 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean impl.logger.Errorw("error in helm install custom chart", "err", err) return false, err } - if util.GetGRPCErrorDetailedMessage(err) == context.Canceled.Error() { + if util.GetClientErrorDetailedMessage(err) == context.Canceled.Error() { err = errors.New(pipelineConfig.NEW_DEPLOYMENT_INITIATED) } @@ -1020,10 +1025,13 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean if err != nil { impl.logger.Errorw("error in helm install custom chart", "err", err) - if pgErr != nil { impl.logger.Errorw("failed to update deployment app created flag in pipeline table", "err", err) } + apiError := clientErrors.ConvertToApiError(err) + if apiError != nil { + return false, apiError + } return false, err } diff --git a/pkg/errors/utils.go b/pkg/errors/utils.go new file mode 100644 index 0000000000..9c403f1325 --- /dev/null +++ b/pkg/errors/utils.go @@ -0,0 +1,41 @@ +package errors + +import ( + util2 "github.com/devtron-labs/devtron/internal/util" + "net/http" + "strconv" + "strings" +) + +// extract out this custom error messages in kubelink and send custom error messages from kubelink +const ( + ClusterUnreachableErrorMsg = "cluster unreachable" + CrdPreconditionErrorMsg = "ensure CRDs are installed first" + ArrayStringMismatchErrorMsg = "got array expected string" + NamespaceNotFoundErrorMsg = "namespace not found" + InvalidValueErrorMsg = "invalid value in manifest" + OperationInProgressErrorMsg = "another operation (install/upgrade/rollback) is in progress" +) + +var errorHttpStatusCodeMap = map[string]int{ + ClusterUnreachableErrorMsg: http.StatusUnprocessableEntity, + CrdPreconditionErrorMsg: http.StatusPreconditionFailed, + NamespaceNotFoundErrorMsg: http.StatusConflict, + ArrayStringMismatchErrorMsg: http.StatusFailedDependency, + InvalidValueErrorMsg: http.StatusFailedDependency, + OperationInProgressErrorMsg: http.StatusConflict, +} + +func ConvertToApiError(err error) *util2.ApiError { + for errMsg, statusCode := range errorHttpStatusCodeMap { + if strings.Contains(err.Error(), errMsg) { + return &util2.ApiError{ + InternalMessage: err.Error(), + UserMessage: err.Error(), + HttpStatusCode: statusCode, + Code: strconv.Itoa(statusCode), + } + } + } + return nil +} diff --git a/pkg/generateManifest/DeployementTemplateService.go b/pkg/generateManifest/DeployementTemplateService.go index 4e4db12471..d5988425de 100644 --- a/pkg/generateManifest/DeployementTemplateService.go +++ b/pkg/generateManifest/DeployementTemplateService.go @@ -24,6 +24,7 @@ import ( util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" + "net/http" "os" "strconv" "time" @@ -364,6 +365,10 @@ func (impl DeploymentTemplateServiceImpl) GenerateManifest(ctx context.Context, templateChartResponse, err := impl.helmAppClient.TemplateChart(ctx, installReleaseRequest) if err != nil { impl.Logger.Errorw("error in templating chart", "err", err) + clientErrCode, errMsg := util.GetClientDetailedError(err) + if clientErrCode.IsInvalidArgumentCode() { + return nil, &util.ApiError{HttpStatusCode: http.StatusConflict, Code: strconv.Itoa(http.StatusConflict), InternalMessage: errMsg, UserMessage: errMsg} + } return nil, err } response := &openapi2.TemplateChartResponse{ diff --git a/pkg/workflow/cd/CdWorkflowCommonService.go b/pkg/workflow/cd/CdWorkflowCommonService.go index 65a91d236d..73230584c4 100644 --- a/pkg/workflow/cd/CdWorkflowCommonService.go +++ b/pkg/workflow/cd/CdWorkflowCommonService.go @@ -140,7 +140,7 @@ func (impl *CdWorkflowCommonServiceImpl) MarkCurrentDeploymentFailed(runner *pip //update current WF with error status impl.logger.Errorw("error in triggering cd WF, setting wf status as fail ", "wfId", runner.Id, "err", releaseErr) runner.Status = pipelineConfig.WorkflowFailed - runner.Message = util.GetGRPCErrorDetailedMessage(releaseErr) + runner.Message = util.GetClientErrorDetailedMessage(releaseErr) runner.FinishedOn = time.Now() runner.UpdatedOn = time.Now() runner.UpdatedBy = triggeredBy @@ -239,7 +239,7 @@ func (impl *CdWorkflowCommonServiceImpl) UpdateCDWorkflowRunnerStatus(ctx contex } func extractTimelineFailedStatusDetails(err error) string { - errorString := util.GetGRPCErrorDetailedMessage(err) + errorString := util.GetClientErrorDetailedMessage(err) switch errorString { case pipelineConfig.FOUND_VULNERABILITY: return pipelineConfig.TIMELINE_DESCRIPTION_VULNERABLE_IMAGE diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 2ec92a7602..e08759c094 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -278,14 +278,14 @@ func (impl *WorkflowDagExecutorImpl) UpdateWorkflowRunnerStatusForDeployment(app if err != nil { impl.logger.Errorw("error in getting helm app release status", "appIdentifier", appIdentifier, "err", err) // Handle release not found errors - if skipReleaseNotFound && util.GetGRPCErrorDetailedMessage(err) != bean6.ErrReleaseNotFound { + if skipReleaseNotFound && util.GetClientErrorDetailedMessage(err) != bean6.ErrReleaseNotFound { // skip this error and continue for next workflow status impl.logger.Warnw("found error, skipping helm apps status update for this trigger", "appIdentifier", appIdentifier, "err", err) return false } // If release not found, mark the deployment as failure wfr.Status = pipelineConfig.WorkflowFailed - wfr.Message = util.GetGRPCErrorDetailedMessage(err) + wfr.Message = util.GetClientErrorDetailedMessage(err) wfr.FinishedOn = time.Now() return true } @@ -325,7 +325,7 @@ func (impl *WorkflowDagExecutorImpl) UpdateWorkflowRunnerStatusForDeployment(app } func (impl *WorkflowDagExecutorImpl) handleAsyncTriggerReleaseError(releaseErr error, cdWfr *pipelineConfig.CdWorkflowRunner, overrideRequest *bean.ValuesOverrideRequest, appIdentifier *client2.AppIdentifier) { - releaseErrString := util.GetGRPCErrorDetailedMessage(releaseErr) + releaseErrString := util.GetClientErrorDetailedMessage(releaseErr) switch releaseErrString { case context.DeadlineExceeded.Error(): // if context deadline is exceeded fetch release status and UpdateWorkflowRunnerStatusForDeployment From dc49da4409e2e710dc782430219786e9d91f8e06 Mon Sep 17 00:00:00 2001 From: kartik-579 <84493919+kartik-579@users.noreply.github.com> Date: Wed, 20 Mar 2024 16:59:01 +0530 Subject: [PATCH 07/29] chore: refactoring v4 (#4775) * removed registerInArgo multiple impls * extracted app metrics code * migrated envLevel app metrics code to new service * chore: Removed unused jira and migration integration (#4498) * removed unsued jira integration * removed test-suite-code * db migration conf removal * chore: removed unused injection * chore: removed dead code * added: migration script --------- Co-authored-by: Ash-exp * chore: App store dead code cleanup and restructuring (#4497) * moved chart-group in seperate code * removed unused dependency * removed dead code * extracted resource tree * moved notes * resource movement * removed unused code * removed unused dependency * commit methods * extracted status update * chore: clean up unused dead code * updated: EA mode docker file * updated: migration number --------- Co-authored-by: Ash-exp * chart ref refactoring * removed infra metrics db calls * moved app metrics repositories from /internal to /pkg * moved: const and types to bean * removed: unused const * review comments * migrated some methods from chartService to chartRefService * added dt validation service interface * minor refactoring * moved validation method - 1 * wip * removed redundant appMetrics req obj * moved app metrics bindings to wireset * removed multiple dead code * remove redundant dependency * moved ChartGroup router and rest handler to respective folder * stage 1 * gitOps refactoring * moved gitClient code to a common wrapper service * chore: AppStoreDeployment Install flow refactoring * review changes * wip * fix for unsupported charts * refactoring: App Store deployment services * minor cleanup * renamed remote package to git * renamed gitOpsRemoteOpService If and impl * migrated usages of gitService to gitOperationService * shifted git service and all gitOps clients to pkg * gitops repository usages refactor * refactored gitOpsRepository usages * gitlab client creation refactoring * renamed util/ChartService * reverted renaming changes * reverted renaming changes * reverted renaming changes * wip * wip * removed typo * changes * changes * extracted trigger cd, nats subscriptions from wfDAGExec service * removed gitOpsRepoName fetch logic duplicacy * minor change for cd trigger method * removed redundant imports * extracted deployment bulk trigger publish event logic from workflowDag * extracted manifest creation code from WorkflowDagExecutor * moved WorkflowStatusUpdateHandler * removed old refactored code * wip - extracted k8s op method from workflowDAG part 1 * extracted artifact logic from workflowDAG * extracted artifact logic from workflowDAG * refactoring * replaced slices -> k8s.io/utils/strings/slices import * replaced slices -> k8s.io/utils/strings/slices import * fix prod bug * renamed PrePostStageTriggerService -> preStageTriggerService * fix for rollback * wip * refactoring pre & post stage service * updated PreCdTriggerService * migrated AsyncTrigger consumer to eventProcessor * review comments * removed whitespaces * migrated ci material topic to processor service * migrated argo app status subsciption to common processor pkg * migrated argo type pipeline publish and process to common pkg * migrated appstore bulk deploy topic * migrated cd bulk deploy topic * migrated appstore helm install status topic * migrated git webhook event publish * minor changes in manifest creation service * minor changes in cd trigger service * minor changes in cd trigger service * wip * minor change in async helm install req handling * wip * updated common lib version(synced with main) * safety check for concurrency in pipeline delete and asyn trigger * updated common-lib version * updated common-lib version to main 5807b130153800727ace993e98b24cb27b8fc1fa --------- Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: Ash-exp Co-authored-by: nishant --- App.go | 4 - Wire.go | 9 - api/restHandler/ExternalCiRestHandler.go | 2 +- api/restHandler/WebhookEventHandler.go | 29 +- .../trigger/PipelineTriggerRestHandler.go | 5 +- api/router/pubsub/GitWebhookHandler.go | 88 ---- api/router/router.go | 12 +- .../cron/CdApplicationStatusUpdateHandler.go | 75 +-- go.mod | 2 +- go.sum | 4 +- .../pipelineConfig/CdWorfkflowRepository.go | 4 +- .../pipelineConfig/PipelineRepository.go | 12 + pkg/appStore/bean/bean.go | 5 - pkg/appStore/chartGroup/ChartGroupService.go | 85 +--- .../FullMode/InstalledAppDBExtendedService.go | 63 +-- ...InstalledAppDeploymentTypeChangeService.go | 37 +- pkg/bean/app.go | 17 - pkg/bulkAction/BulkUpdateService.go | 135 +---- .../manifest/ManifestCreationService.go | 468 +++++++----------- pkg/deployment/manifest/helper/helper.go | 155 ++++++ .../devtronApps/PostStageTriggerService.go | 60 +-- .../devtronApps/PreStageTriggerService.go | 200 +++++--- .../trigger/devtronApps/TriggerService.go | 37 +- .../trigger/devtronApps/adapter/adapter.go | 17 + .../trigger/devtronApps/bean/bean.go | 15 +- .../trigger/devtronApps/helper/helper.go | 5 + .../CentralEventProcessorService.go | 95 +++- .../bean/appStoreAppsEventBean.go | 6 + .../bean/cdPipelineEventBean.go | 8 + .../bean/deployedApplicationEventBean.go | 18 + pkg/eventProcessor/bean/workflowEventBean.go | 6 + .../in/AppStoreAppsEventProcessorService.go | 113 +++++ .../in/CDPipelineEventProcessorService.go | 149 ++++++ .../in/CIPipelineEventProcessorService.go | 61 +++ ...eployedApplicationEventProcessorService.go | 125 ++--- .../in/WorkflowEventProcessorService.go | 282 ++++++++++- .../in/wire_eventProcessorIn.go | 4 + .../out/AppStoreAppsEventPublishService.go | 48 ++ .../out/CDPipelineEventPublishService.go | 78 +++ .../out/CIPipelineEventPublishService.go | 44 ++ .../out/PipelineConfigEventPublishService.go | 46 ++ pkg/eventProcessor/out/bean/bean.go | 15 +- .../out/wire_eventProcessorOut.go | 12 + .../DeployementTemplateService.go | 7 - .../DeploymentPipelineConfigService.go | 193 ++++---- pkg/pipeline/PipelineConfigServiceListener.go | 38 -- pkg/workflow/cd/CdWorkflowRunnerService.go | 11 + pkg/workflow/dag/WorkflowDagExecutor.go | 221 +-------- pkg/workflow/status/WorkflowStatusService.go | 23 +- .../common-lib/pubsub-lib/JetStreamUtil.go | 25 +- vendor/modules.txt | 2 +- wire_gen.go | 82 ++- 52 files changed, 1802 insertions(+), 1455 deletions(-) delete mode 100644 api/router/pubsub/GitWebhookHandler.go create mode 100644 pkg/deployment/manifest/helper/helper.go create mode 100644 pkg/deployment/trigger/devtronApps/adapter/adapter.go create mode 100644 pkg/eventProcessor/bean/appStoreAppsEventBean.go create mode 100644 pkg/eventProcessor/bean/cdPipelineEventBean.go create mode 100644 pkg/eventProcessor/bean/deployedApplicationEventBean.go create mode 100644 pkg/eventProcessor/in/AppStoreAppsEventProcessorService.go create mode 100644 pkg/eventProcessor/in/CDPipelineEventProcessorService.go create mode 100644 pkg/eventProcessor/in/CIPipelineEventProcessorService.go rename api/router/pubsub/ApplicationStatusHandler.go => pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go (76%) create mode 100644 pkg/eventProcessor/out/AppStoreAppsEventPublishService.go create mode 100644 pkg/eventProcessor/out/CDPipelineEventPublishService.go create mode 100644 pkg/eventProcessor/out/CIPipelineEventPublishService.go create mode 100644 pkg/eventProcessor/out/PipelineConfigEventPublishService.go delete mode 100644 pkg/pipeline/PipelineConfigServiceListener.go diff --git a/App.go b/App.go index 5132d9c7a5..10e470aec0 100644 --- a/App.go +++ b/App.go @@ -35,7 +35,6 @@ import ( "github.com/casbin/casbin" authMiddleware "github.com/devtron-labs/authenticator/middleware" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/devtron/api/router" "github.com/devtron-labs/devtron/api/sse" "github.com/devtron-labs/devtron/internal/middleware" @@ -52,7 +51,6 @@ type App struct { Enforcer *casbin.SyncedEnforcer server *http.Server db *pg.DB - pubsubClient *pubsub.PubSubClientServiceImpl posthogClient *telemetry.PosthogClient centralEventProcessor *eventProcessor.CentralEventProcessor // used for local dev only @@ -67,7 +65,6 @@ func NewApp(router *router.MuxRouter, sse *sse.SSE, enforcer *casbin.SyncedEnforcer, db *pg.DB, - pubsubClient *pubsub.PubSubClientServiceImpl, sessionManager2 *authMiddleware.SessionManager, posthogClient *telemetry.PosthogClient, loggingMiddleware util.LoggingMiddleware, @@ -81,7 +78,6 @@ func NewApp(router *router.MuxRouter, SSE: sse, Enforcer: enforcer, db: db, - pubsubClient: pubsubClient, serveTls: false, sessionManager2: sessionManager2, posthogClient: posthogClient, diff --git a/Wire.go b/Wire.go index fe85f8a3ad..37091e7e83 100644 --- a/Wire.go +++ b/Wire.go @@ -66,7 +66,6 @@ import ( status3 "github.com/devtron-labs/devtron/api/router/app/pipeline/status" trigger2 "github.com/devtron-labs/devtron/api/router/app/pipeline/trigger" workflow2 "github.com/devtron-labs/devtron/api/router/app/workflow" - "github.com/devtron-labs/devtron/api/router/pubsub" "github.com/devtron-labs/devtron/api/server" "github.com/devtron-labs/devtron/api/sse" "github.com/devtron-labs/devtron/api/team" @@ -491,12 +490,6 @@ func InitializeApp() (*App, error) { pubsub1.NewPubSubClientServiceImpl, - pubsub.NewGitWebhookHandler, - wire.Bind(new(pubsub.GitWebhookHandler), new(*pubsub.GitWebhookHandlerImpl)), - - pubsub.NewApplicationStatusHandlerImpl, - wire.Bind(new(pubsub.ApplicationStatusHandler), new(*pubsub.ApplicationStatusHandlerImpl)), - rbac.NewEnforcerUtilImpl, wire.Bind(new(rbac.EnforcerUtil), new(*rbac.EnforcerUtilImpl)), @@ -966,8 +959,6 @@ func InitializeApp() (*App, error) { pipeline.NewPluginInputVariableParserImpl, wire.Bind(new(pipeline.PluginInputVariableParser), new(*pipeline.PluginInputVariableParserImpl)), - pipeline.NewPipelineConfigListenerServiceImpl, - wire.Bind(new(pipeline.PipelineConfigListenerService), new(*pipeline.PipelineConfigListenerServiceImpl)), cron2.NewCronLoggerImpl, imageDigestPolicy.NewImageDigestPolicyServiceImpl, diff --git a/api/restHandler/ExternalCiRestHandler.go b/api/restHandler/ExternalCiRestHandler.go index f1ab9a2d73..7385d3d6af 100644 --- a/api/restHandler/ExternalCiRestHandler.go +++ b/api/restHandler/ExternalCiRestHandler.go @@ -19,8 +19,8 @@ package restHandler import ( "encoding/json" - util3 "github.com/devtron-labs/devtron/api/util" "github.com/devtron-labs/devtron/pkg/workflow/dag" + util3 "github.com/devtron-labs/devtron/api/util" "net/http" "strconv" diff --git a/api/restHandler/WebhookEventHandler.go b/api/restHandler/WebhookEventHandler.go index 85a8cf2d1d..c608cca8cf 100644 --- a/api/restHandler/WebhookEventHandler.go +++ b/api/restHandler/WebhookEventHandler.go @@ -18,7 +18,7 @@ package restHandler import ( - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "io/ioutil" "net/http" "strconv" @@ -36,21 +36,24 @@ type WebhookEventHandler interface { } type WebhookEventHandlerImpl struct { - logger *zap.SugaredLogger - gitHostConfig pipeline.GitHostConfig - eventClient client.EventClient - webhookSecretValidator git.WebhookSecretValidator - webhookEventDataConfig pipeline.WebhookEventDataConfig + logger *zap.SugaredLogger + gitHostConfig pipeline.GitHostConfig + eventClient client.EventClient + webhookSecretValidator git.WebhookSecretValidator + webhookEventDataConfig pipeline.WebhookEventDataConfig + ciPipelineEventPublishService out.CIPipelineEventPublishService } func NewWebhookEventHandlerImpl(logger *zap.SugaredLogger, gitHostConfig pipeline.GitHostConfig, eventClient client.EventClient, - webhookSecretValidator git.WebhookSecretValidator, webhookEventDataConfig pipeline.WebhookEventDataConfig) *WebhookEventHandlerImpl { + webhookSecretValidator git.WebhookSecretValidator, webhookEventDataConfig pipeline.WebhookEventDataConfig, + ciPipelineEventPublishService out.CIPipelineEventPublishService) *WebhookEventHandlerImpl { return &WebhookEventHandlerImpl{ - logger: logger, - gitHostConfig: gitHostConfig, - eventClient: eventClient, - webhookSecretValidator: webhookSecretValidator, - webhookEventDataConfig: webhookEventDataConfig, + logger: logger, + gitHostConfig: gitHostConfig, + eventClient: eventClient, + webhookSecretValidator: webhookSecretValidator, + webhookEventDataConfig: webhookEventDataConfig, + ciPipelineEventPublishService: ciPipelineEventPublishService, } } @@ -121,7 +124,7 @@ func (impl WebhookEventHandlerImpl) OnWebhookEvent(w http.ResponseWriter, r *htt } // write event - err = impl.eventClient.WriteNatsEvent(pubsub.WEBHOOK_EVENT_TOPIC, webhookEvent) + err = impl.ciPipelineEventPublishService.PublishGitWebhookEvent(gitHostId, eventType, string(requestBodyBytes)) if err != nil { impl.logger.Errorw("Error while handling webhook in git-sensor", "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go b/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go index a0e89a1822..e18dbb5517 100644 --- a/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go +++ b/api/restHandler/app/pipeline/trigger/PipelineTriggerRestHandler.go @@ -27,7 +27,6 @@ import ( bean3 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/eventProcessor/out" bean4 "github.com/devtron-labs/devtron/pkg/eventProcessor/out/bean" - "github.com/devtron-labs/devtron/pkg/workflow/dag" "net/http" "strconv" @@ -65,7 +64,6 @@ type PipelineTriggerRestHandlerImpl struct { enforcer casbin.Enforcer teamService team.TeamService logger *zap.SugaredLogger - workflowDagExecutor dag.WorkflowDagExecutor enforcerUtil rbac.EnforcerUtil deploymentGroupService deploymentGroup.DeploymentGroupService argoUserService argo.ArgoUserService @@ -77,7 +75,7 @@ type PipelineTriggerRestHandlerImpl struct { func NewPipelineRestHandler(appService app.AppService, userAuthService user.UserService, validator *validator.Validate, enforcer casbin.Enforcer, teamService team.TeamService, logger *zap.SugaredLogger, enforcerUtil rbac.EnforcerUtil, - workflowDagExecutor dag.WorkflowDagExecutor, deploymentGroupService deploymentGroup.DeploymentGroupService, + deploymentGroupService deploymentGroup.DeploymentGroupService, argoUserService argo.ArgoUserService, deploymentConfigService pipeline.DeploymentConfigService, deployedAppService deployedApp.DeployedAppService, cdTriggerService devtronApps.TriggerService, @@ -89,7 +87,6 @@ func NewPipelineRestHandler(appService app.AppService, userAuthService user.User enforcer: enforcer, teamService: teamService, logger: logger, - workflowDagExecutor: workflowDagExecutor, enforcerUtil: enforcerUtil, deploymentGroupService: deploymentGroupService, argoUserService: argoUserService, diff --git a/api/router/pubsub/GitWebhookHandler.go b/api/router/pubsub/GitWebhookHandler.go deleted file mode 100644 index 32415cdf5c..0000000000 --- a/api/router/pubsub/GitWebhookHandler.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2020 Devtron Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package pubsub - -import ( - "encoding/json" - "github.com/devtron-labs/common-lib/pubsub-lib/model" - - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/devtron/client/gitSensor" - "github.com/devtron-labs/devtron/pkg/git" - - "go.uber.org/zap" -) - -type GitWebhookHandler interface { - Subscribe() error -} - -type GitWebhookHandlerImpl struct { - logger *zap.SugaredLogger - pubsubClient *pubsub.PubSubClientServiceImpl - gitWebhookService git.GitWebhookService -} - -func NewGitWebhookHandler(logger *zap.SugaredLogger, pubsubClient *pubsub.PubSubClientServiceImpl, gitWebhookService git.GitWebhookService) *GitWebhookHandlerImpl { - gitWebhookHandlerImpl := &GitWebhookHandlerImpl{ - logger: logger, - pubsubClient: pubsubClient, - gitWebhookService: gitWebhookService, - } - err := gitWebhookHandlerImpl.Subscribe() - if err != nil { - logger.Error("err", err) - return nil - } - return gitWebhookHandlerImpl -} - -func (impl *GitWebhookHandlerImpl) Subscribe() error { - callback := func(msg *model.PubSubMsg) { - //defer msg.Ack() - ciPipelineMaterial := gitSensor.CiPipelineMaterial{} - err := json.Unmarshal([]byte(string(msg.Data)), &ciPipelineMaterial) - if err != nil { - impl.logger.Error("Error while unmarshalling json response", "error", err) - return - } - resp, err := impl.gitWebhookService.HandleGitWebhook(ciPipelineMaterial) - impl.logger.Debug(resp) - if err != nil { - impl.logger.Error("err", err) - return - } - } - - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - ciPipelineMaterial := gitSensor.CiPipelineMaterial{} - err := json.Unmarshal([]byte(string(msg.Data)), &ciPipelineMaterial) - if err != nil { - return "error while unmarshalling json response", []interface{}{"error", err} - } - return "got message for about new ci material", []interface{}{"ciPipelineMaterialId", ciPipelineMaterial.Id, "gitMaterialId", ciPipelineMaterial.GitMaterialId, "type", ciPipelineMaterial.Type} - } - - err := impl.pubsubClient.Subscribe(pubsub.NEW_CI_MATERIAL_TOPIC, callback, loggerFunc) - if err != nil { - impl.logger.Error("err", err) - return err - } - return nil -} diff --git a/api/router/router.go b/api/router/router.go index 76fd8734e3..34abf114f0 100644 --- a/api/router/router.go +++ b/api/router/router.go @@ -19,7 +19,6 @@ package router import ( "encoding/json" - pubsub2 "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/devtron/api/apiToken" "github.com/devtron-labs/devtron/api/appStore" "github.com/devtron-labs/devtron/api/appStore/chartGroup" @@ -39,7 +38,6 @@ import ( "github.com/devtron-labs/devtron/api/module" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/api/router/app" - "github.com/devtron-labs/devtron/api/router/pubsub" "github.com/devtron-labs/devtron/api/server" "github.com/devtron-labs/devtron/api/team" terminal2 "github.com/devtron-labs/devtron/api/terminal" @@ -69,10 +67,7 @@ type MuxRouter struct { DockerRegRouter DockerRegRouter NotificationRouter NotificationRouter TeamRouter team.TeamRouter - pubsubClient *pubsub2.PubSubClientServiceImpl UserRouter user.UserRouter - gitWebhookHandler pubsub.GitWebhookHandler - appUpdateHandler pubsub.ApplicationStatusHandler ChartRefRouter ChartRefRouter ConfigMapRouter ConfigMapRouter AppStoreRouter appStore.AppStoreRouter @@ -129,9 +124,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, DockerRegRouter DockerRegRouter, NotificationRouter NotificationRouter, TeamRouter team.TeamRouter, - gitWebhookHandler pubsub.GitWebhookHandler, - appUpdateHandler pubsub.ApplicationStatusHandler, - pubsubClient *pubsub2.PubSubClientServiceImpl, UserRouter user.UserRouter, + UserRouter user.UserRouter, ChartRefRouter ChartRefRouter, ConfigMapRouter ConfigMapRouter, AppStoreRouter appStore.AppStoreRouter, chartRepositoryRouter chartRepo.ChartRepositoryRouter, ReleaseMetricsRouter ReleaseMetricsRouter, deploymentGroupRouter DeploymentGroupRouter, batchOperationRouter BatchOperationRouter, chartGroupRouter chartGroup.ChartGroupRouter, imageScanRouter ImageScanRouter, @@ -164,9 +157,6 @@ func NewMuxRouter(logger *zap.SugaredLogger, NotificationRouter: NotificationRouter, TeamRouter: TeamRouter, logger: logger, - gitWebhookHandler: gitWebhookHandler, - appUpdateHandler: appUpdateHandler, - pubsubClient: pubsubClient, UserRouter: UserRouter, ChartRefRouter: ChartRefRouter, ConfigMapRouter: ConfigMapRouter, diff --git a/client/cron/CdApplicationStatusUpdateHandler.go b/client/cron/CdApplicationStatusUpdateHandler.go index 264a922403..fa9e2bfc73 100644 --- a/client/cron/CdApplicationStatusUpdateHandler.go +++ b/client/cron/CdApplicationStatusUpdateHandler.go @@ -1,10 +1,7 @@ package cron import ( - "encoding/json" "fmt" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/common-lib/pubsub-lib/model" "github.com/devtron-labs/devtron/api/bean" client2 "github.com/devtron-labs/devtron/client/events" "github.com/devtron-labs/devtron/internal/middleware" @@ -19,12 +16,10 @@ import ( "github.com/devtron-labs/devtron/pkg/workflow/cd" "github.com/devtron-labs/devtron/pkg/workflow/dag" "github.com/devtron-labs/devtron/pkg/workflow/status" - bean3 "github.com/devtron-labs/devtron/pkg/workflow/status/bean" "github.com/devtron-labs/devtron/util" cron2 "github.com/devtron-labs/devtron/util/cron" "github.com/robfig/cron/v3" "go.uber.org/zap" - "k8s.io/utils/pointer" "strconv" "time" ) @@ -33,7 +28,6 @@ type CdApplicationStatusUpdateHandler interface { HelmApplicationStatusUpdate() ArgoApplicationStatusUpdate() ArgoPipelineTimelineUpdate() - Subscribe() error SyncPipelineStatusForResourceTreeCall(pipeline *pipelineConfig.Pipeline) error SyncPipelineStatusForAppStoreForResourceTreeCall(installedAppVersion *repository2.InstalledAppVersions) error ManualSyncPipelineStatus(appId, envId int, userId int32) error @@ -46,7 +40,6 @@ type CdApplicationStatusUpdateHandlerImpl struct { workflowDagExecutor dag.WorkflowDagExecutor installedAppService EAMode.InstalledAppDBService AppStatusConfig *app.AppServiceConfig - pubsubClient *pubsub.PubSubClientServiceImpl pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository eventClient client2.EventClient appListingRepository repository.AppListingRepository @@ -60,7 +53,7 @@ type CdApplicationStatusUpdateHandlerImpl struct { func NewCdApplicationStatusUpdateHandlerImpl(logger *zap.SugaredLogger, appService app.AppService, workflowDagExecutor dag.WorkflowDagExecutor, installedAppService EAMode.InstalledAppDBService, - CdHandler pipeline.CdHandler, AppStatusConfig *app.AppServiceConfig, pubsubClient *pubsub.PubSubClientServiceImpl, + AppStatusConfig *app.AppServiceConfig, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, eventClient client2.EventClient, appListingRepository repository.AppListingRepository, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, @@ -79,7 +72,6 @@ func NewCdApplicationStatusUpdateHandlerImpl(logger *zap.SugaredLogger, appServi workflowDagExecutor: workflowDagExecutor, installedAppService: installedAppService, AppStatusConfig: AppStatusConfig, - pubsubClient: pubsubClient, pipelineStatusTimelineRepository: pipelineStatusTimelineRepository, eventClient: eventClient, appListingRepository: appListingRepository, @@ -90,13 +82,7 @@ func NewCdApplicationStatusUpdateHandlerImpl(logger *zap.SugaredLogger, appServi cdWorkflowCommonService: cdWorkflowCommonService, workflowStatusService: workflowStatusService, } - - err := impl.Subscribe() - if err != nil { - logger.Errorw("error on subscribe", "err", err) - return nil - } - _, err = cron.AddFunc(AppStatusConfig.CdHelmPipelineStatusCronTime, impl.HelmApplicationStatusUpdate) + _, err := cron.AddFunc(AppStatusConfig.CdHelmPipelineStatusCronTime, impl.HelmApplicationStatusUpdate) if err != nil { logger.Errorw("error in starting helm application status update cron job", "err", err) return nil @@ -114,63 +100,6 @@ func NewCdApplicationStatusUpdateHandlerImpl(logger *zap.SugaredLogger, appServi return impl } -func (impl *CdApplicationStatusUpdateHandlerImpl) Subscribe() error { - callback := func(msg *model.PubSubMsg) { - statusUpdateEvent := bean3.ArgoPipelineStatusSyncEvent{} - var err error - var cdPipeline *pipelineConfig.Pipeline - var installedApp repository2.InstalledApps - - err = json.Unmarshal([]byte(string(msg.Data)), &statusUpdateEvent) - if err != nil { - impl.logger.Errorw("unmarshal error on argo pipeline status update event", "err", err) - return - } - - if statusUpdateEvent.IsAppStoreApplication { - installedApp, err = impl.installedAppVersionRepository.GetInstalledAppByInstalledAppVersionId(statusUpdateEvent.InstalledAppVersionId) - if err != nil { - impl.logger.Errorw("error in getting installedAppVersion by id", "err", err, "id", statusUpdateEvent.PipelineId) - return - } - } else { - cdPipeline, err = impl.pipelineRepository.FindById(statusUpdateEvent.PipelineId) - if err != nil { - impl.logger.Errorw("error in getting cdPipeline by id", "err", err, "id", statusUpdateEvent.PipelineId) - return - } - } - - triggerContext := bean2.TriggerContext{ - ReferenceId: pointer.String(msg.MsgId), - } - - err, _ = impl.workflowStatusService.UpdatePipelineTimelineAndStatusByLiveApplicationFetch(triggerContext, cdPipeline, installedApp, statusUpdateEvent.UserId) - if err != nil { - impl.logger.Errorw("error on argo pipeline status update", "err", err, "msg", string(msg.Data)) - return - } - } - - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - statusUpdateEvent := bean3.ArgoPipelineStatusSyncEvent{} - err := json.Unmarshal([]byte(msg.Data), &statusUpdateEvent) - if err != nil { - return "unmarshal error on argo pipeline status update event", []interface{}{"err", err} - } - return "got message for argo pipeline status update", []interface{}{"pipelineId", statusUpdateEvent.PipelineId, "installedAppVersionId", statusUpdateEvent.InstalledAppVersionId, "isAppStoreApplication", statusUpdateEvent.IsAppStoreApplication} - } - - validations := impl.cdWorkflowCommonService.GetTriggerValidateFuncs() - err := impl.pubsubClient.Subscribe(pubsub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, callback, loggerFunc, validations...) - if err != nil { - impl.logger.Errorw("error in subscribing to argo application status update topic", "err", err) - return err - } - return nil -} - func (impl *CdApplicationStatusUpdateHandlerImpl) HelmApplicationStatusUpdate() { cronProcessStartTime := time.Now() defer func() { diff --git a/go.mod b/go.mod index 83a5ba0529..2cb7645d2a 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b - github.com/devtron-labs/common-lib v0.0.16-0.20240304102639-17132681584e + github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538 github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb github.com/evanphx/json-patch v5.6.0+incompatible github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 diff --git a/go.sum b/go.sum index de5efd6898..4eeaa4cbdb 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,8 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADG github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b h1:aHKsdB4ghsp+x8167W8MZyF3WQOixTSJFyARrU+qR6s= github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b/go.mod h1:a5gxST+HNmJReXE2TkCicFQFWtlhp8eqBRwS23GydNE= -github.com/devtron-labs/common-lib v0.0.16-0.20240304102639-17132681584e h1:tUOhs588RN0TbWF2u5w4NbR7epqzMZ2/0doBXD80MzY= -github.com/devtron-labs/common-lib v0.0.16-0.20240304102639-17132681584e/go.mod h1:95/DizzVXu1kHap/VwEvdxwgd+BvPVYc0bJzt8yqGDU= +github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538 h1:KG/XRlhT3Mc066fE5qOk02kybqtnWTIsJhsUZ3gzDHc= +github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538/go.mod h1:95/DizzVXu1kHap/VwEvdxwgd+BvPVYc0bJzt8yqGDU= github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb h1:CkfQQgZc950/hTPqtQSiHV2RmZgkBLGCzwR02FZYjAU= github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb/go.mod h1:pjLjgoa1GzbkOkvbMyP4SAKsaiK7eG6GoQCNauG03JA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 7086bdcf90..23f9229660 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -480,7 +480,7 @@ func (impl *CdWorkflowRepositoryImpl) FindLatestWfrByAppIdAndEnvironmentId(appId func (impl *CdWorkflowRepositoryImpl) IsLatestCDWfr(pipelineId, wfrId int) (bool, error) { wfr := &CdWorkflowRunner{} - exists, err := impl.dbConnection. + ifAnySuccessorWfrExists, err := impl.dbConnection. Model(wfr). Column("cd_workflow_runner.*", "CdWorkflow"). Where("wf.pipeline_id = ?", pipelineId). @@ -489,7 +489,7 @@ func (impl *CdWorkflowRepositoryImpl) IsLatestCDWfr(pipelineId, wfrId int) (bool Join("inner join cd_workflow wf on wf.id = cd_workflow_runner.cd_workflow_id"). Where("cd_workflow_runner.id > ?", wfrId). Exists() - return exists, err + return !ifAnySuccessorWfrExists, err } func (impl *CdWorkflowRepositoryImpl) FindLastPreOrPostTriggeredByEnvironmentId(appId int, environmentId int) (CdWorkflowRunner, error) { diff --git a/internal/sql/repository/pipelineConfig/PipelineRepository.go b/internal/sql/repository/pipelineConfig/PipelineRepository.go index e541ffdb18..83ec16e042 100644 --- a/internal/sql/repository/pipelineConfig/PipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/PipelineRepository.go @@ -73,6 +73,7 @@ type PipelineRepository interface { FindByName(pipelineName string) (pipeline *Pipeline, err error) PipelineExists(pipelineName string) (bool, error) FindById(id int) (pipeline *Pipeline, err error) + FindByIdEvenIfInactive(id int) (pipeline *Pipeline, err error) GetPostStageConfigById(id int) (pipeline *Pipeline, err error) FindAppAndEnvDetailsByPipelineId(id int) (pipeline *Pipeline, err error) FindActiveByEnvIdAndDeploymentType(environmentId int, deploymentAppType string, exclusionList []int, includeApps []int) ([]*Pipeline, error) @@ -302,6 +303,17 @@ func (impl PipelineRepositoryImpl) FindById(id int) (pipeline *Pipeline, err err return pipeline, err } +func (impl PipelineRepositoryImpl) FindByIdEvenIfInactive(id int) (pipeline *Pipeline, err error) { + pipeline = &Pipeline{} + err = impl.dbConnection. + Model(pipeline). + Column("pipeline.*", "App", "Environment"). + Join("inner join app a on pipeline.app_id = a.id"). + Where("pipeline.id = ?", id). + Select() + return pipeline, err +} + func (impl PipelineRepositoryImpl) GetPostStageConfigById(id int) (pipeline *Pipeline, err error) { pipeline = &Pipeline{} err = impl.dbConnection. diff --git a/pkg/appStore/bean/bean.go b/pkg/appStore/bean/bean.go index ab6b535c4e..b3715cfebc 100644 --- a/pkg/appStore/bean/bean.go +++ b/pkg/appStore/bean/bean.go @@ -200,11 +200,6 @@ type Dependency struct { Repository string `json:"repository"` } -type DeployPayload struct { - InstalledAppVersionId int - InstalledAppVersionHistoryId int -} - const REFERENCE_TYPE_DEFAULT string = "DEFAULT" const REFERENCE_TYPE_TEMPLATE string = "TEMPLATE" const REFERENCE_TYPE_DEPLOYED string = "DEPLOYED" diff --git a/pkg/appStore/chartGroup/ChartGroupService.go b/pkg/appStore/chartGroup/ChartGroupService.go index 8f6248e605..605d2c9357 100644 --- a/pkg/appStore/chartGroup/ChartGroupService.go +++ b/pkg/appStore/chartGroup/ChartGroupService.go @@ -21,11 +21,8 @@ import ( "bytes" "context" "crypto/sha1" - "encoding/json" "errors" "fmt" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/common-lib/pubsub-lib/model" "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" @@ -38,6 +35,7 @@ import ( repository5 "github.com/devtron-labs/devtron/pkg/cluster/repository" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out" repository4 "github.com/devtron-labs/devtron/pkg/team" "github.com/devtron-labs/devtron/util/argo" "io/ioutil" @@ -69,7 +67,6 @@ type ChartGroupServiceImpl struct { environmentRepository repository5.EnvironmentRepository teamRepository repository4.TeamRepository appStoreValuesService service.AppStoreValuesService - pubSubClient *pubsub.PubSubClientServiceImpl envService cluster2.EnvironmentService appStoreDeploymentService service2.AppStoreDeploymentService argoUserService argo.ArgoUserService @@ -77,6 +74,7 @@ type ChartGroupServiceImpl struct { acdConfig *argocdServer.ACDConfig fullModeDeploymentService deployment.FullModeDeploymentService gitOperationService git.GitOperationService + appStoreAppsEventPublishService out.AppStoreAppsEventPublishService } func NewChartGroupServiceImpl(logger *zap.SugaredLogger, @@ -90,14 +88,14 @@ func NewChartGroupServiceImpl(logger *zap.SugaredLogger, environmentRepository repository5.EnvironmentRepository, teamRepository repository4.TeamRepository, appStoreValuesService service.AppStoreValuesService, - pubSubClient *pubsub.PubSubClientServiceImpl, envService cluster2.EnvironmentService, appStoreDeploymentService service2.AppStoreDeploymentService, argoUserService argo.ArgoUserService, pipelineStatusTimelineService status.PipelineStatusTimelineService, acdConfig *argocdServer.ACDConfig, fullModeDeploymentService deployment.FullModeDeploymentService, - gitOperationService git.GitOperationService) (*ChartGroupServiceImpl, error) { + gitOperationService git.GitOperationService, + appStoreAppsEventPublishService out.AppStoreAppsEventPublishService) (*ChartGroupServiceImpl, error) { impl := &ChartGroupServiceImpl{ logger: logger, chartGroupEntriesRepository: chartGroupEntriesRepository, @@ -110,7 +108,6 @@ func NewChartGroupServiceImpl(logger *zap.SugaredLogger, environmentRepository: environmentRepository, teamRepository: teamRepository, appStoreValuesService: appStoreValuesService, - pubSubClient: pubSubClient, envService: envService, appStoreDeploymentService: appStoreDeploymentService, argoUserService: argoUserService, @@ -118,12 +115,7 @@ func NewChartGroupServiceImpl(logger *zap.SugaredLogger, acdConfig: acdConfig, fullModeDeploymentService: fullModeDeploymentService, gitOperationService: gitOperationService, - } - - err := impl.subscribe() - if err != nil { - impl.logger.Errorw("error in nats subscription", "topic", pubsub.BULK_APPSTORE_DEPLOY_TOPIC, "err", err) - return nil, err + appStoreAppsEventPublishService: appStoreAppsEventPublishService, } return impl, nil } @@ -140,7 +132,9 @@ type ChartGroupService interface { DeployBulk(chartGroupInstallRequest *ChartGroupInstallRequest) (*ChartGroupInstallAppRes, error) DeployDefaultChartOnCluster(bean *cluster2.ClusterBean, userId int32) (bool, error) - TriggerDeploymentEvent(installAppVersions []*appStoreBean.InstallAppVersionDTO) + TriggerDeploymentEventAndHandleStatusUpdate(installAppVersions []*appStoreBean.InstallAppVersionDTO) + + PerformDeployStage(installedAppVersionId int, installedAppVersionHistoryId int, userId int32) (*appStoreBean.InstallAppVersionDTO, error) } type ChartGroupList struct { @@ -606,7 +600,7 @@ func (impl *ChartGroupServiceImpl) DeployBulk(chartGroupInstallRequest *ChartGro return nil, err } //nats event - impl.TriggerDeploymentEvent(installAppVersions) + impl.TriggerDeploymentEventAndHandleStatusUpdate(installAppVersions) // TODO refactoring: why empty obj ?? return &ChartGroupInstallAppRes{}, nil } @@ -671,26 +665,19 @@ func createChartGroupEntryObject(installAppVersionDTO *appStoreBean.InstallAppVe } } -func (impl *ChartGroupServiceImpl) TriggerDeploymentEvent(installAppVersions []*appStoreBean.InstallAppVersionDTO) { - for _, versions := range installAppVersions { +func (impl *ChartGroupServiceImpl) TriggerDeploymentEventAndHandleStatusUpdate(installAppVersions []*appStoreBean.InstallAppVersionDTO) { + publishErrMap := impl.appStoreAppsEventPublishService.PublishBulkDeployEvent(installAppVersions) + for _, version := range installAppVersions { var installedAppDeploymentStatus appStoreBean.AppstoreDeploymentStatus - payload := &appStoreBean.DeployPayload{InstalledAppVersionId: versions.InstalledAppVersionId, InstalledAppVersionHistoryId: versions.InstalledAppVersionHistoryId} - data, err := json.Marshal(payload) - if err != nil { + publishErr, ok := publishErrMap[version.InstalledAppVersionId] + if !ok || publishErr != nil { installedAppDeploymentStatus = appStoreBean.QUE_ERROR } else { - err = impl.pubSubClient.Publish(pubsub.BULK_APPSTORE_DEPLOY_TOPIC, string(data)) - if err != nil { - impl.logger.Errorw("err while publishing msg for app-store bulk deploy", "msg", data, "err", err) - installedAppDeploymentStatus = appStoreBean.QUE_ERROR - } else { - installedAppDeploymentStatus = appStoreBean.ENQUEUED - } - + installedAppDeploymentStatus = appStoreBean.ENQUEUED } - if versions.Status == appStoreBean.DEPLOY_INIT || versions.Status == appStoreBean.QUE_ERROR || versions.Status == appStoreBean.ENQUEUED { + if version.Status == appStoreBean.DEPLOY_INIT || version.Status == appStoreBean.QUE_ERROR || version.Status == appStoreBean.ENQUEUED { impl.logger.Debugw("status for bulk app-store deploy", "status", installedAppDeploymentStatus) - _, err = impl.appStoreDeploymentService.AppStoreDeployOperationStatusUpdate(payload.InstalledAppVersionId, installedAppDeploymentStatus) + _, err := impl.appStoreDeploymentService.AppStoreDeployOperationStatusUpdate(version.InstalledAppVersionId, installedAppDeploymentStatus) if err != nil { impl.logger.Errorw("error while bulk app-store deploy status update", "err", err) } @@ -863,7 +850,7 @@ func (impl *ChartGroupServiceImpl) deployDefaultComponent(chartGroupInstallReque //nats event for _, versions := range installAppVersions { - _, err := impl.performDeployStage(versions.InstalledAppVersionId, versions.InstalledAppVersionHistoryId, chartGroupInstallRequest.UserId) + _, err := impl.PerformDeployStage(versions.InstalledAppVersionId, versions.InstalledAppVersionHistoryId, chartGroupInstallRequest.UserId) if err != nil { impl.logger.Errorw("error in performing deploy stage", "deployPayload", versions, "err", err) _, err = impl.appStoreDeploymentService.AppStoreDeployOperationStatusUpdate(versions.InstalledAppVersionId, appStoreBean.QUE_ERROR) @@ -876,41 +863,7 @@ func (impl *ChartGroupServiceImpl) deployDefaultComponent(chartGroupInstallReque return &ChartGroupInstallAppRes{}, nil } -func (impl *ChartGroupServiceImpl) subscribe() error { - callback := func(msg *model.PubSubMsg) { - deployPayload := &appStoreBean.DeployPayload{} - err := json.Unmarshal([]byte(string(msg.Data)), &deployPayload) - if err != nil { - impl.logger.Error("Error while unmarshalling deployPayload json object", "error", err) - return - } - impl.logger.Debugw("deployPayload:", "deployPayload", deployPayload) - //using userId 1 - for system user - _, err = impl.performDeployStage(deployPayload.InstalledAppVersionId, deployPayload.InstalledAppVersionHistoryId, 1) - if err != nil { - impl.logger.Errorw("error in performing deploy stage", "deployPayload", deployPayload, "err", err) - } - } - - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - deployPayload := &appStoreBean.DeployPayload{} - err := json.Unmarshal([]byte(string(msg.Data)), &deployPayload) - if err != nil { - return "error while unmarshalling deployPayload json object", []interface{}{"error", err} - } - return "got message for deploy app-store apps in bulk", []interface{}{"installedAppVersionId", deployPayload.InstalledAppVersionId, "installedAppVersionHistoryId", deployPayload.InstalledAppVersionHistoryId} - } - - err := impl.pubSubClient.Subscribe(pubsub.BULK_APPSTORE_DEPLOY_TOPIC, callback, loggerFunc) - if err != nil { - impl.logger.Error("err", err) - return err - } - return nil -} - -func (impl *ChartGroupServiceImpl) performDeployStage(installedAppVersionId int, installedAppVersionHistoryId int, userId int32) (*appStoreBean.InstallAppVersionDTO, error) { +func (impl *ChartGroupServiceImpl) PerformDeployStage(installedAppVersionId int, installedAppVersionHistoryId int, userId int32) (*appStoreBean.InstallAppVersionDTO, error) { ctx := context.Background() installedAppVersion, err := impl.appStoreDeploymentService.GetInstalledAppVersion(installedAppVersionId, userId) if err != nil { diff --git a/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go b/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go index 9aa5c45fc4..778f2034b6 100644 --- a/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go +++ b/pkg/appStore/installedApp/service/FullMode/InstalledAppDBExtendedService.go @@ -18,13 +18,8 @@ package FullMode import ( - "encoding/json" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/common-lib/pubsub-lib/model" argoApplication "github.com/devtron-labs/devtron/client/argocdServer/bean" - "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" - appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "time" @@ -46,7 +41,6 @@ type InstalledAppDBExtendedService interface { type InstalledAppDBExtendedServiceImpl struct { *EAMode.InstalledAppDBServiceImpl appStatusService appStatus.AppStatusService - pubSubClient *pubsub.PubSubClientServiceImpl gitOpsConfigReadService config.GitOpsConfigReadService } @@ -56,9 +50,8 @@ func NewInstalledAppDBExtendedServiceImpl(logger *zap.SugaredLogger, userService user.UserService, installedAppRepositoryHistory repository2.InstalledAppVersionHistoryRepository, appStatusService appStatus.AppStatusService, - pubSubClient *pubsub.PubSubClientServiceImpl, - gitOpsConfigReadService config.GitOpsConfigReadService) (*InstalledAppDBExtendedServiceImpl, error) { - impl := &InstalledAppDBExtendedServiceImpl{ + gitOpsConfigReadService config.GitOpsConfigReadService) *InstalledAppDBExtendedServiceImpl { + return &InstalledAppDBExtendedServiceImpl{ InstalledAppDBServiceImpl: &EAMode.InstalledAppDBServiceImpl{ Logger: logger, InstalledAppRepository: installedAppRepository, @@ -67,60 +60,8 @@ func NewInstalledAppDBExtendedServiceImpl(logger *zap.SugaredLogger, InstalledAppRepositoryHistory: installedAppRepositoryHistory, }, appStatusService: appStatusService, - pubSubClient: pubSubClient, gitOpsConfigReadService: gitOpsConfigReadService, } - err := impl.subscribeHelmInstallStatus() - if err != nil { - return nil, err - } - return impl, nil -} - -func (impl *InstalledAppDBExtendedServiceImpl) subscribeHelmInstallStatus() error { - - callback := func(msg *model.PubSubMsg) { - - helmInstallNatsMessage := &appStoreBean.HelmReleaseStatusConfig{} - err := json.Unmarshal([]byte(msg.Data), helmInstallNatsMessage) - if err != nil { - impl.Logger.Errorw("error in unmarshalling helm install status nats message", "err", err) - return - } - - installedAppVersionHistory, err := impl.InstalledAppRepositoryHistory.GetInstalledAppVersionHistory(helmInstallNatsMessage.InstallAppVersionHistoryId) - if err != nil { - impl.Logger.Errorw("error in fetching installed app by installed app id in subscribe helm status callback", "err", err) - return - } - if helmInstallNatsMessage.ErrorInInstallation { - installedAppVersionHistory.Status = pipelineConfig.WorkflowFailed - } else { - installedAppVersionHistory.Status = pipelineConfig.WorkflowSucceeded - } - installedAppVersionHistory.HelmReleaseStatusConfig = msg.Data - _, err = impl.InstalledAppRepositoryHistory.UpdateInstalledAppVersionHistory(installedAppVersionHistory, nil) - if err != nil { - impl.Logger.Errorw("error in updating helm release status data in installedAppVersionHistoryRepository", "err", err) - return - } - } - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - helmInstallNatsMessage := &appStoreBean.HelmReleaseStatusConfig{} - err := json.Unmarshal([]byte(msg.Data), helmInstallNatsMessage) - if err != nil { - return "error in unmarshalling helm install status nats message", []interface{}{"err", err} - } - return "got nats msg for helm chart install status", []interface{}{"InstallAppVersionHistoryId", helmInstallNatsMessage.InstallAppVersionHistoryId, "ErrorInInstallation", helmInstallNatsMessage.ErrorInInstallation, "IsReleaseInstalled", helmInstallNatsMessage.IsReleaseInstalled} - } - - err := impl.pubSubClient.Subscribe(pubsub.HELM_CHART_INSTALL_STATUS_TOPIC, callback, loggerFunc) - if err != nil { - impl.Logger.Error(err) - return err - } - return nil } func (impl *InstalledAppDBExtendedServiceImpl) UpdateInstalledAppVersionStatus(application *v1alpha1.Application) (bool, error) { diff --git a/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go b/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go index 6750e9dd84..06c0e94276 100644 --- a/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go +++ b/pkg/appStore/installedApp/service/FullMode/deploymentTypeChange/InstalledAppDeploymentTypeChangeService.go @@ -25,6 +25,7 @@ import ( "github.com/devtron-labs/devtron/pkg/cluster" repository5 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/k8s" util3 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" @@ -109,13 +110,13 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) MigrateDeploymentType(c return response, err } - var deleteDeploymentType bean.DeploymentType + var deleteDeploymentType bean2.DeploymentType var deployStatus appStoreBean.AppstoreDeploymentStatus - if request.DesiredDeploymentType == bean.ArgoCd { - deleteDeploymentType = bean.Helm + if request.DesiredDeploymentType == bean2.ArgoCd { + deleteDeploymentType = bean2.Helm deployStatus = appStoreBean.DEPLOY_INIT } else { - deleteDeploymentType = bean.ArgoCd + deleteDeploymentType = bean2.ArgoCd deployStatus = appStoreBean.DEPLOY_SUCCESS } envBean, err := impl.environmentRepository.FindById(request.EnvId) @@ -147,7 +148,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) MigrateDeploymentType(c if len(installedAppIds) == 0 { return response, &util.ApiError{HttpStatusCode: http.StatusNotFound, UserMessage: fmt.Sprintf("no installed apps found for this desired deployment type %s", request.DesiredDeploymentType)} } - if request.DesiredDeploymentType == bean.Helm { + if request.DesiredDeploymentType == bean2.Helm { //before deleting the installed app we'll first annotate CRD's manifest created by argo-cd with helm supported //annotations so that helm install doesn't throw crd already exist error while migrating from argo-cd to helm. for _, installedApp := range installedApps { @@ -190,7 +191,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) MigrateDeploymentType(c return response, nil } -func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfterMigrations(desiredDeploymentType bean.DeploymentType, +func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfterMigrations(desiredDeploymentType bean2.DeploymentType, successInstalledAppIds []int, successAppIds []*int, userId int32, deployStatus int) error { err := impl.installedAppRepository.UpdateDeploymentAppTypeInInstalledApp(desiredDeploymentType, successInstalledAppIds, userId, deployStatus) @@ -202,7 +203,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfte return err } - if desiredDeploymentType == bean.ArgoCd { + if desiredDeploymentType == bean2.ArgoCd { //this is to handle the case when an external helm app linked to devtron is being //migrated to argo_cd then it's app offering mode should be full mode err = impl.appRepository.UpdateAppOfferingModeForAppIds(successAppIds, util3.SERVER_MODE_FULL, userId) @@ -286,9 +287,9 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) deleteInstalledApps(ctx deploymentAppName := fmt.Sprintf("%s-%s", installedApp.App.AppName, installedApp.Environment.Name) var err error // delete request - if installedApp.DeploymentAppType == bean.ArgoCd { + if installedApp.DeploymentAppType == bean2.ArgoCd { err = impl.fullModeDeploymentService.DeleteACD(deploymentAppName, ctx, false) - } else if installedApp.DeploymentAppType == bean.Helm { + } else if installedApp.DeploymentAppType == bean2.Helm { // For converting from Helm to ArgoCD, GitOps should be configured if gitOpsConfigErr != nil || !gitOpsConfigStatus.IsGitOpsConfigured { err = &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: "200", UserMessage: errors.New("GitOps not configured or unable to fetch GitOps configuration")} @@ -348,7 +349,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) isInstalledAppInfoValid func (impl *InstalledAppDeploymentTypeChangeServiceImpl) handleNotDeployedAppsIfArgoDeploymentType(installedApp *repository2.InstalledApps, failedToDeleteApps []*bean.DeploymentChangeStatus) ([]*bean.DeploymentChangeStatus, error) { - if installedApp.DeploymentAppType == string(bean.ArgoCd) { + if installedApp.DeploymentAppType == string(bean2.ArgoCd) { // check if app status is Healthy status, err := impl.appStatusRepository.Get(installedApp.AppId, installedApp.EnvironmentId) @@ -434,7 +435,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) TriggerAfterMigration(c return response, err } - impl.chartGroupService.TriggerDeploymentEvent(installedAppVersionDTOList) + impl.chartGroupService.TriggerDeploymentEventAndHandleStatusUpdate(installedAppVersionDTOList) err = impl.performDbOperationsAfterTrigger(request.DesiredDeploymentType, successInstalledApps) if err != nil { @@ -450,8 +451,8 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) TriggerAfterMigration(c return response, nil } -func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfterTrigger(desiredDeploymentType bean.DeploymentType, successInstalledApps []*repository2.InstalledApps) error { - if desiredDeploymentType == bean.Helm { +func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfterTrigger(desiredDeploymentType bean2.DeploymentType, successInstalledApps []*repository2.InstalledApps) error { + if desiredDeploymentType == bean2.Helm { err := impl.deleteAppStatusEntryAfterTrigger(successInstalledApps) if err != nil && err == pg.ErrNoRows { impl.logger.Infow("app status already deleted or not found after trigger and migration from argo-cd to helm", @@ -466,7 +467,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) performDbOperationsAfte return nil } -func (impl *InstalledAppDeploymentTypeChangeServiceImpl) getDtoListForTriggerDeploymentEvent(desiredDeploymentType bean.DeploymentType, successInstalledApps []*repository2.InstalledApps) ([]*appStoreBean.InstallAppVersionDTO, error) { +func (impl *InstalledAppDeploymentTypeChangeServiceImpl) getDtoListForTriggerDeploymentEvent(desiredDeploymentType bean2.DeploymentType, successInstalledApps []*repository2.InstalledApps) ([]*appStoreBean.InstallAppVersionDTO, error) { var installedAppVersionDTOList []*appStoreBean.InstallAppVersionDTO for _, installedApp := range successInstalledApps { installedAppVersion, err := impl.installedAppRepository.GetActiveInstalledAppVersionByInstalledAppId(installedApp.Id) @@ -495,8 +496,8 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) getDtoListForTriggerDep return installedAppVersionDTOList, nil } -func (impl *InstalledAppDeploymentTypeChangeServiceImpl) updateDeployedOnDataForTrigger(desiredDeploymentType bean.DeploymentType, installedAppVersion *repository2.InstalledAppVersions, installedAppVersionHistory *repository2.InstalledAppVersionHistory) error { - if desiredDeploymentType == bean.Helm { +func (impl *InstalledAppDeploymentTypeChangeServiceImpl) updateDeployedOnDataForTrigger(desiredDeploymentType bean2.DeploymentType, installedAppVersion *repository2.InstalledAppVersions, installedAppVersionHistory *repository2.InstalledAppVersionHistory) error { + if desiredDeploymentType == bean2.Helm { //for helm, on ui we show last deployed installed app versions table installedAppVersion.UpdatedOn = time.Now() _, err := impl.installedAppRepository.UpdateInstalledAppVersion(installedAppVersion, nil) @@ -506,7 +507,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) updateDeployedOnDataFor "err", err) return err } - } else if desiredDeploymentType == bean.ArgoCd { + } else if desiredDeploymentType == bean2.ArgoCd { //for argo-cd deployments, on ui we show last deployed time from installed app version history table installedAppVersionHistory.StartedOn, installedAppVersionHistory.UpdatedOn = time.Now(), time.Now() @@ -566,7 +567,7 @@ func (impl *InstalledAppDeploymentTypeChangeServiceImpl) fetchDeletedInstalledAp deploymentAppName := fmt.Sprintf("%s-%s", installedApp.App.AppName, installedApp.Environment.Name) var err error - if installedApp.DeploymentAppType == bean.ArgoCd { + if installedApp.DeploymentAppType == bean2.ArgoCd { appIdentifier := &client.AppIdentifier{ ClusterId: installedApp.Environment.ClusterId, ReleaseName: deploymentAppName, diff --git a/pkg/bean/app.go b/pkg/bean/app.go index fc82ed6313..429239bdd4 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -701,27 +701,10 @@ type CdPipelineTrigger struct { PipelineId int `json:"pipelineId"` } -type DeploymentType = string - -const ( - Helm DeploymentType = "helm" - ArgoCd DeploymentType = "argo_cd" - ManifestDownload DeploymentType = "manifest_download" - GitOpsWithoutDeployment DeploymentType = "git_ops_without_deployment" -) - const ( HelmReleaseMetadataAnnotation = `{"metadata": {"annotations": {"meta.helm.sh/release-name": "%s","meta.helm.sh/release-namespace": "%s"},"labels": {"app.kubernetes.io/managed-by": "Helm"}}}` ) -func IsAcdApp(deploymentType string) bool { - return deploymentType == ArgoCd -} - -func IsHelmApp(deploymentType string) bool { - return deploymentType == Helm -} - type Status string const ( diff --git a/pkg/bulkAction/BulkUpdateService.go b/pkg/bulkAction/BulkUpdateService.go index 60f766ff28..7fff668c49 100644 --- a/pkg/bulkAction/BulkUpdateService.go +++ b/pkg/bulkAction/BulkUpdateService.go @@ -6,8 +6,6 @@ import ( "encoding/json" "errors" "fmt" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/common-lib/pubsub-lib/model" "github.com/devtron-labs/devtron/api/bean" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" client "github.com/devtron-labs/devtron/api/helm-app/service" @@ -28,23 +26,18 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" bean3 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" - "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps" - bean4 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/history" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/variables" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" - "github.com/devtron-labs/devtron/pkg/workflow/cd" - "github.com/devtron-labs/devtron/pkg/workflow/dag" - "github.com/devtron-labs/devtron/util/argo" "github.com/devtron-labs/devtron/util/rbac" jsonpatch "github.com/evanphx/json-patch" "github.com/go-pg/pg" "github.com/tidwall/gjson" "github.com/tidwall/sjson" "go.uber.org/zap" - "k8s.io/utils/pointer" "net/http" "sort" "strings" @@ -76,21 +69,17 @@ type BulkUpdateServiceImpl struct { appRepository app.AppRepository deploymentTemplateHistoryService history.DeploymentTemplateHistoryService configMapHistoryService history.ConfigMapHistoryService - workflowDagExecutor dag.WorkflowDagExecutor pipelineBuilder pipeline.PipelineBuilder enforcerUtil rbac.EnforcerUtil ciHandler pipeline.CiHandler ciPipelineRepository pipelineConfig.CiPipelineRepository appWorkflowRepository appWorkflow.AppWorkflowRepository appWorkflowService appWorkflow2.AppWorkflowService - pubsubClient *pubsub.PubSubClientServiceImpl - argoUserService argo.ArgoUserService scopedVariableManager variables.ScopedVariableManager deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService chartRefService chartRef.ChartRefService - cdTriggerService devtronApps.TriggerService deployedAppService deployedApp.DeployedAppService - cdWorkflowCommonService cd.CdWorkflowCommonService + cdPipelineEventPublishService out.CDPipelineEventPublishService } func NewBulkUpdateServiceImpl(bulkUpdateRepository bulkUpdate.BulkUpdateRepository, @@ -99,22 +88,19 @@ func NewBulkUpdateServiceImpl(bulkUpdateRepository bulkUpdate.BulkUpdateReposito pipelineRepository pipelineConfig.PipelineRepository, appRepository app.AppRepository, deploymentTemplateHistoryService history.DeploymentTemplateHistoryService, - configMapHistoryService history.ConfigMapHistoryService, workflowDagExecutor dag.WorkflowDagExecutor, + configMapHistoryService history.ConfigMapHistoryService, pipelineBuilder pipeline.PipelineBuilder, enforcerUtil rbac.EnforcerUtil, ciHandler pipeline.CiHandler, ciPipelineRepository pipelineConfig.CiPipelineRepository, appWorkflowRepository appWorkflow.AppWorkflowRepository, appWorkflowService appWorkflow2.AppWorkflowService, - pubsubClient *pubsub.PubSubClientServiceImpl, - argoUserService argo.ArgoUserService, scopedVariableManager variables.ScopedVariableManager, deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, chartRefService chartRef.ChartRefService, - cdTriggerService devtronApps.TriggerService, deployedAppService deployedApp.DeployedAppService, - cdWorkflowCommonService cd.CdWorkflowCommonService) (*BulkUpdateServiceImpl, error) { - impl := &BulkUpdateServiceImpl{ + cdPipelineEventPublishService out.CDPipelineEventPublishService) *BulkUpdateServiceImpl { + return &BulkUpdateServiceImpl{ bulkUpdateRepository: bulkUpdateRepository, logger: logger, environmentRepository: environmentRepository, @@ -122,25 +108,19 @@ func NewBulkUpdateServiceImpl(bulkUpdateRepository bulkUpdate.BulkUpdateReposito appRepository: appRepository, deploymentTemplateHistoryService: deploymentTemplateHistoryService, configMapHistoryService: configMapHistoryService, - workflowDagExecutor: workflowDagExecutor, pipelineBuilder: pipelineBuilder, enforcerUtil: enforcerUtil, ciHandler: ciHandler, ciPipelineRepository: ciPipelineRepository, appWorkflowRepository: appWorkflowRepository, appWorkflowService: appWorkflowService, - pubsubClient: pubsubClient, - argoUserService: argoUserService, scopedVariableManager: scopedVariableManager, deployedAppMetricsService: deployedAppMetricsService, chartRefService: chartRefService, - cdTriggerService: cdTriggerService, deployedAppService: deployedAppService, - cdWorkflowCommonService: cdWorkflowCommonService, + cdPipelineEventPublishService: cdPipelineEventPublishService, } - err := impl.SubscribeToCdBulkTriggerTopic() - return impl, err } const ( @@ -1337,43 +1317,14 @@ func (impl BulkUpdateServiceImpl) BulkDeploy(request *BulkApplicationForEnvironm continue } artifact := artifacts[0] - overrideRequest := &bean.ValuesOverrideRequest{ - PipelineId: pipeline.Id, - AppId: pipeline.AppId, - CiArtifactId: artifact.Id, - UserId: request.UserId, - CdWorkflowType: bean.CD_WORKFLOW_TYPE_DEPLOY, - } - event := &bean.BulkCdDeployEvent{ - ValuesOverrideRequest: overrideRequest, - UserId: overrideRequest.UserId, - } - - payload, err := json.Marshal(event) - if err != nil { - impl.logger.Errorw("failed to marshal cd bulk deploy event request", - "request", event, - "err", err) - - pipelineResponse := response[appKey] - pipelineResponse[pipelineKey] = false - response[appKey] = pipelineResponse - continue - } - - err = impl.pubsubClient.Publish(pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, string(payload)) + err = impl.cdPipelineEventPublishService.PublishBulkTriggerTopicEvent(pipeline.Id, pipeline.AppId, artifact.CiPipelineId, request.UserId) if err != nil { - impl.logger.Errorw("failed to publish trigger request event", - "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, - "request", overrideRequest, - "err", err) - + impl.logger.Errorw("error, PublishBulkTriggerTopicEvent", "err", err, "pipeline", pipeline) pipelineResponse := response[appKey] pipelineResponse[pipelineKey] = false response[appKey] = pipelineResponse continue } - pipelineResponse := response[appKey] pipelineResponse[pipelineKey] = success response[appKey] = pipelineResponse @@ -1384,76 +1335,6 @@ func (impl BulkUpdateServiceImpl) BulkDeploy(request *BulkApplicationForEnvironm return bulkOperationResponse, nil } -func (impl BulkUpdateServiceImpl) SubscribeToCdBulkTriggerTopic() error { - - callback := func(msg *model.PubSubMsg) { - - event := &bean.BulkCdDeployEvent{} - err := json.Unmarshal([]byte(msg.Data), event) - if err != nil { - impl.logger.Errorw("Error unmarshalling received event", - "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, - "msg", msg.Data, - "err", err) - return - } - event.ValuesOverrideRequest.UserId = event.UserId - - // trigger - ctx, err := impl.buildACDContext() - if err != nil { - impl.logger.Errorw("error in creating acd context", - "err", err) - return - } - - triggerContext := bean4.TriggerContext{ - ReferenceId: pointer.String(msg.MsgId), - Context: ctx, - } - - _, err = impl.cdTriggerService.ManualCdTrigger(triggerContext, event.ValuesOverrideRequest) - if err != nil { - impl.logger.Errorw("Error triggering CD", - "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, - "msg", msg.Data, - "err", err) - } - } - - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - event := &bean.BulkCdDeployEvent{} - err := json.Unmarshal([]byte(msg.Data), event) - if err != nil { - return "error unmarshalling received event", []interface{}{"msg", msg.Data, "err", err} - } - return "got message for trigger cd in bulk", []interface{}{"pipelineId", event.ValuesOverrideRequest.PipelineId, "appId", event.ValuesOverrideRequest.AppId, "cdWorkflowType", event.ValuesOverrideRequest.CdWorkflowType, "ciArtifactId", event.ValuesOverrideRequest.CiArtifactId} - } - - validations := impl.cdWorkflowCommonService.GetTriggerValidateFuncs() - err := impl.pubsubClient.Subscribe(pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, callback, loggerFunc, validations...) - if err != nil { - impl.logger.Error("failed to subscribe to NATS topic", - "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, - "err", err) - return err - } - return nil -} - -func (impl *BulkUpdateServiceImpl) buildACDContext() (acdContext context.Context, err error) { - //this part only accessible for acd apps hibernation, if acd configured it will fetch latest acdToken, else it will return error - acdToken, err := impl.argoUserService.GetLatestDevtronArgoCdUserToken() - if err != nil { - impl.logger.Errorw("error in getting acd token", "err", err) - return nil, err - } - ctx := context.Background() - ctx = context.WithValue(ctx, "token", acdToken) - return ctx, nil -} - func (impl BulkUpdateServiceImpl) BulkBuildTrigger(request *BulkApplicationForEnvironmentPayload, ctx context.Context, w http.ResponseWriter, token string, checkAuthForBulkActions func(token string, appObject string, envObject string) bool) (*BulkApplicationForEnvironmentResponse, error) { var pipelines []*pipelineConfig.Pipeline var err error diff --git a/pkg/deployment/manifest/ManifestCreationService.go b/pkg/deployment/manifest/ManifestCreationService.go index 1134d8dc7f..5b83d35bc7 100644 --- a/pkg/deployment/manifest/ManifestCreationService.go +++ b/pkg/deployment/manifest/ManifestCreationService.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" application3 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" - "github.com/aws/aws-sdk-go/service/autoscaling" util5 "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/devtron/api/bean" application2 "github.com/devtron-labs/devtron/client/argocdServer/application" @@ -23,22 +22,19 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" + "github.com/devtron-labs/devtron/pkg/deployment/manifest/helper" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" "github.com/devtron-labs/devtron/pkg/k8s" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" - "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/sql" - util3 "github.com/devtron-labs/devtron/pkg/util" "github.com/devtron-labs/devtron/pkg/variables" "github.com/devtron-labs/devtron/pkg/variables/parsers" repository5 "github.com/devtron-labs/devtron/pkg/variables/repository" util4 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" errors2 "github.com/juju/errors" - "github.com/pkg/errors" "github.com/tidwall/gjson" - "github.com/tidwall/sjson" "go.opentelemetry.io/otel" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime/schema" @@ -147,12 +143,7 @@ func (impl *ManifestCreationServiceImpl) BuildManifestForTrigger(overrideRequest } func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*app.ValuesOverrideResponse, error) { - if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { - overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY - } - if len(overrideRequest.DeploymentWithConfig) == 0 { - overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED - } + helper.ResolveDeploymentTypeAndUpdate(overrideRequest) valuesOverrideResponse := &app.ValuesOverrideResponse{} isPipelineOverrideCreated := overrideRequest.PipelineOverrideId > 0 pipeline, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) @@ -225,8 +216,8 @@ func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideReq if !isPipelineOverrideCreated { chartVersion := envOverride.Chart.ChartVersion _, span = otel.Tracer("orchestrator").Start(ctx, "getConfigMapAndSecretJsonV2") - scope := getScopeForVariables(overrideRequest, envOverride) - request := createConfigMapAndSecretJsonRequest(overrideRequest, envOverride, chartVersion, scope) + scope := helper.GetScopeForVariables(overrideRequest, envOverride) + request := helper.CreateConfigMapAndSecretJsonRequest(overrideRequest, envOverride, chartVersion, scope) configMapJson, err = impl.getConfigMapAndSecretJsonV2(request, envOverride) span.End() @@ -269,7 +260,6 @@ func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideReq } func (impl *ManifestCreationServiceImpl) getDeploymentStrategyByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (*chartConfig.PipelineStrategy, error) { - strategy := &chartConfig.PipelineStrategy{} var err error if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { @@ -284,31 +274,15 @@ func (impl *ManifestCreationServiceImpl) getDeploymentStrategyByTriggerType(over strategy.Config = strategyHistory.Config strategy.PipelineId = overrideRequest.PipelineId } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - if overrideRequest.ForceTrigger { + deploymentTemplateType := helper.GetDeploymentTemplateType(overrideRequest) + if overrideRequest.ForceTrigger || len(deploymentTemplateType) == 0 { _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) span.End() } else { - var deploymentTemplate chartRepoRepository.DeploymentStrategy - if overrideRequest.DeploymentTemplate == "ROLLING" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING - } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN - } else if overrideRequest.DeploymentTemplate == "CANARY" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY - } else if overrideRequest.DeploymentTemplate == "RECREATE" { - deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE - } - - if len(deploymentTemplate) > 0 { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") - strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplate, overrideRequest.PipelineId) - span.End() - } else { - _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.GetDefaultStrategyByPipelineId") - strategy, err = impl.pipelineConfigRepository.GetDefaultStrategyByPipelineId(overrideRequest.PipelineId) - span.End() - } + _, span := otel.Tracer("orchestrator").Start(ctx, "pipelineConfigRepository.FindByStrategyAndPipelineId") + strategy, err = impl.pipelineConfigRepository.FindByStrategyAndPipelineId(deploymentTemplateType, overrideRequest.PipelineId) + span.End() } if err != nil && errors2.IsNotFound(err) == false { impl.logger.Errorf("invalid state", "err", err, "req", strategy) @@ -319,168 +293,185 @@ func (impl *ManifestCreationServiceImpl) getDeploymentStrategyByTriggerType(over } func (impl *ManifestCreationServiceImpl) getEnvOverrideByTriggerType(overrideRequest *bean.ValuesOverrideRequest, triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { - envOverride := &chartConfig.EnvConfigOverride{} - var err error if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { - _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") - deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - //VARIABLE_SNAPSHOT_GET and resolve - - span.End() - if err != nil { - impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) - return nil, err - } - templateName := deploymentTemplateHistory.TemplateName - templateVersion := deploymentTemplateHistory.TemplateVersion - if templateName == "Rollout Deployment" { - templateName = "" - } - //getting chart_ref by id - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") - chartRefDto, err := impl.chartRefService.FindByVersionAndName(templateVersion, templateName) - span.End() + envOverride, err = impl.getEnvOverrideForSpecificConfigTrigger(overrideRequest, ctx) if err != nil { - impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) + impl.logger.Errorw("error, getEnvOverrideForSpecificConfigTrigger", "err", err, "overrideRequest", overrideRequest) return nil, err } - //assuming that if a chartVersion is deployed then it's envConfigOverride will be available - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRefDto.Id) - span.End() + } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { + envOverride, err = impl.getEnvOverrideForLastSavedConfigTrigger(overrideRequest, triggeredAt, ctx) if err != nil { - impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRefDto.Id) + impl.logger.Errorw("error, getEnvOverrideForLastSavedConfigTrigger", "err", err, "overrideRequest", overrideRequest) return nil, err } + } + return envOverride, nil +} - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) +func (impl *ManifestCreationServiceImpl) getEnvOverrideForSpecificConfigTrigger(overrideRequest *bean.ValuesOverrideRequest, + ctx context.Context) (*chartConfig.EnvConfigOverride, error) { + envOverride := &chartConfig.EnvConfigOverride{} + var err error + _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") + deploymentTemplateHistory, err := impl.deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId(overrideRequest.PipelineId, overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + //VARIABLE_SNAPSHOT_GET and resolve + span.End() + if err != nil { + impl.logger.Errorw("error in getting deployed deployment template history by pipelineId and wfrId", "err", err, "pipelineId", &overrideRequest, "wfrId", overrideRequest.WfrIdForDeploymentWithSpecificTrigger) + return nil, err + } + templateName := deploymentTemplateHistory.TemplateName + templateVersion := deploymentTemplateHistory.TemplateVersion + if templateName == "Rollout Deployment" { + templateName = "" + } + //getting chart_ref by id + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRefRepository.FindByVersionAndName") + chartRefDto, err := impl.chartRefService.FindByVersionAndName(templateVersion, templateName) + span.End() + if err != nil { + impl.logger.Errorw("error in getting chartRef by version and name", "err", err, "version", templateVersion, "name", templateName) + return nil, err + } + //assuming that if a chartVersion is deployed then it's envConfigOverride will be available + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.GetByAppIdEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.GetByAppIdEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chartRefDto.Id) + span.End() + if err != nil { + impl.logger.Errorw("error in getting envConfigOverride for pipeline for specific chartVersion", "err", err, "appId", overrideRequest.AppId, "envId", overrideRequest.EnvId, "chartRefId", chartRefDto.Id) + return nil, err + } + + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err, "env", envOverride.TargetEnvironment) + return nil, err + } + envOverride.Environment = env + //updating historical data in envConfigOverride and appMetrics flag + envOverride.IsOverride = true + envOverride.EnvOverrideValues = deploymentTemplateHistory.Template + reference := repository5.HistoryReference{ + HistoryReferenceId: deploymentTemplateHistory.Id, + HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, + } + variableMap, resolvedTemplate, err := impl.scopedVariableManager.GetVariableSnapshotAndResolveTemplate(envOverride.EnvOverrideValues, parsers.JsonVariableTemplate, reference, true, false) + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + impl.logger.Errorw("error, GetVariableSnapshotAndResolveTemplate", "err", err, "envOverride", envOverride) + return envOverride, err + } + return envOverride, nil +} + +func (impl *ManifestCreationServiceImpl) getEnvOverrideForLastSavedConfigTrigger(overrideRequest *bean.ValuesOverrideRequest, + triggeredAt time.Time, ctx context.Context) (*chartConfig.EnvConfigOverride, error) { + envOverride := &chartConfig.EnvConfigOverride{} + var err error + _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") + envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) + var chart *chartRepoRepository.Chart + span.End() + if err != nil { + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + return nil, err + } + if envOverride.Id == 0 { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) span.End() if err != nil { - impl.logger.Errorw("unable to find env", "err", err) + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) return nil, err } - envOverride.Environment = env - - //updating historical data in envConfigOverride and appMetrics flag - envOverride.IsOverride = true - envOverride.EnvOverrideValues = deploymentTemplateHistory.Template - reference := repository5.HistoryReference{ - HistoryReferenceId: deploymentTemplateHistory.Id, - HistoryReferenceType: repository5.HistoryReferenceTypeDeploymentTemplate, - } - variableMap, resolvedTemplate, err := impl.scopedVariableManager.GetVariableSnapshotAndResolveTemplate(envOverride.EnvOverrideValues, parsers.JsonVariableTemplate, reference, true, false) - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { - _, span := otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.ActiveEnvConfigOverride") - envOverride, err = impl.environmentConfigRepository.ActiveEnvConfigOverride(overrideRequest.AppId, overrideRequest.EnvId) - - var chart *chartRepoRepository.Chart + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") + envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) span.End() - if err != nil { + if err != nil && !errors2.IsNotFound(err) { impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) return nil, err } - if envOverride.Id == 0 { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + + //creating new env override config + if errors2.IsNotFound(err) || envOverride == nil { + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + environment, err := impl.envRepository.FindById(overrideRequest.EnvId) span.End() - if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + if err != nil && !util.IsErrNoRows(err) { return nil, err } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId") - envOverride, err = impl.environmentConfigRepository.FindChartByAppIdAndEnvIdAndChartRefId(overrideRequest.AppId, overrideRequest.EnvId, chart.ChartRefId) - span.End() - if err != nil && !errors2.IsNotFound(err) { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) - return nil, err + envOverride = &chartConfig.EnvConfigOverride{ + Active: true, + ManualReviewed: true, + Status: models.CHARTSTATUS_SUCCESS, + TargetEnvironment: overrideRequest.EnvId, + ChartId: chart.Id, + AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, + Namespace: environment.Namespace, + IsOverride: false, + EnvOverrideValues: "{}", + Latest: false, + IsBasicViewLocked: chart.IsBasicViewLocked, + CurrentViewEditor: chart.CurrentViewEditor, } - - //creating new env override config - if errors2.IsNotFound(err) || envOverride == nil { - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - environment, err := impl.envRepository.FindById(overrideRequest.EnvId) - span.End() - if err != nil && !util.IsErrNoRows(err) { - return nil, err - } - envOverride = &chartConfig.EnvConfigOverride{ - Active: true, - ManualReviewed: true, - Status: models.CHARTSTATUS_SUCCESS, - TargetEnvironment: overrideRequest.EnvId, - ChartId: chart.Id, - AuditLog: sql.AuditLog{UpdatedBy: overrideRequest.UserId, UpdatedOn: triggeredAt, CreatedOn: triggeredAt, CreatedBy: overrideRequest.UserId}, - Namespace: environment.Namespace, - IsOverride: false, - EnvOverrideValues: "{}", - Latest: false, - IsBasicViewLocked: chart.IsBasicViewLocked, - CurrentViewEditor: chart.CurrentViewEditor, - } - _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") - err = impl.environmentConfigRepository.Save(envOverride) - span.End() - if err != nil { - impl.logger.Errorw("error in creating envConfig", "data", envOverride, "error", err) - return nil, err - } - } - envOverride.Chart = chart - } else if envOverride.Id > 0 && !envOverride.IsOverride { - _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") - chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) + _, span = otel.Tracer("orchestrator").Start(ctx, "environmentConfigRepository.Save") + err = impl.environmentConfigRepository.Save(envOverride) span.End() if err != nil { - impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) + impl.logger.Errorw("error in creating envConfig", "data", envOverride, "error", err) return nil, err } - envOverride.Chart = chart } - - _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + envOverride.Chart = chart + } else if envOverride.Id > 0 && !envOverride.IsOverride { + _, span = otel.Tracer("orchestrator").Start(ctx, "chartRepository.FindLatestChartForAppByAppId") + chart, err = impl.chartRepository.FindLatestChartForAppByAppId(overrideRequest.AppId) span.End() if err != nil { - impl.logger.Errorw("unable to find env", "err", err) + impl.logger.Errorw("invalid state", "err", err, "req", overrideRequest) return nil, err } - envOverride.Environment = env - scope := getScopeForVariables(overrideRequest, envOverride) - if envOverride.IsOverride { - - entity := repository5.GetEntity(envOverride.Id, repository5.EntityTypeDeploymentTemplateEnvLevel) - resolvedTemplate, variableMap, err := impl.scopedVariableManager.GetMappedVariablesAndResolveTemplate(envOverride.EnvOverrideValues, scope, entity, true) - envOverride.ResolvedEnvOverrideValues = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } - - } else { - entity := repository5.GetEntity(chart.Id, repository5.EntityTypeDeploymentTemplateAppLevel) - resolvedTemplate, variableMap, err := impl.scopedVariableManager.GetMappedVariablesAndResolveTemplate(chart.GlobalOverride, scope, entity, true) - envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate - envOverride.VariableSnapshot = variableMap - if err != nil { - return envOverride, err - } + envOverride.Chart = chart + } + _, span = otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err := impl.envRepository.FindById(envOverride.TargetEnvironment) + span.End() + if err != nil { + impl.logger.Errorw("unable to find env", "err", err) + return nil, err + } + envOverride.Environment = env + scope := helper.GetScopeForVariables(overrideRequest, envOverride) + if envOverride.IsOverride { + entity := repository5.GetEntity(envOverride.Id, repository5.EntityTypeDeploymentTemplateEnvLevel) + resolvedTemplate, variableMap, err := impl.scopedVariableManager.GetMappedVariablesAndResolveTemplate(envOverride.EnvOverrideValues, scope, entity, true) + envOverride.ResolvedEnvOverrideValues = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + impl.logger.Errorw("error, GetMappedVariablesAndResolveTemplate env override level template", "err", err, "envOverride", envOverride) + return envOverride, err + } + } else { + entity := repository5.GetEntity(chart.Id, repository5.EntityTypeDeploymentTemplateAppLevel) + resolvedTemplate, variableMap, err := impl.scopedVariableManager.GetMappedVariablesAndResolveTemplate(chart.GlobalOverride, scope, entity, true) + envOverride.Chart.ResolvedGlobalOverride = resolvedTemplate + envOverride.VariableSnapshot = variableMap + if err != nil { + impl.logger.Errorw("error, GetMappedVariablesAndResolveTemplate app level template", "err", err, "chart", chart) + return envOverride, err } } - return envOverride, nil } func (impl *ManifestCreationServiceImpl) getAppMetricsByTriggerType(overrideRequest *bean.ValuesOverrideRequest, ctx context.Context) (bool, error) { - var appMetrics bool if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_SPECIFIC_TRIGGER { _, span := otel.Tracer("orchestrator").Start(ctx, "deploymentTemplateHistoryRepository.GetHistoryByPipelineIdAndWfrId") @@ -491,7 +482,6 @@ func (impl *ManifestCreationServiceImpl) getAppMetricsByTriggerType(overrideRequ return appMetrics, err } appMetrics = deploymentTemplateHistory.IsAppMetricsEnabled - } else if overrideRequest.DeploymentWithConfig == bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED { _, span := otel.Tracer("orchestrator").Start(ctx, "deployedAppMetricsService.GetMetricsFlagForAPipelineByAppIdAndEnvId") isAppMetricsEnabled, err := impl.deployedAppMetricsService.GetMetricsFlagForAPipelineByAppIdAndEnvId(overrideRequest.AppId, overrideRequest.EnvId) @@ -505,27 +495,22 @@ func (impl *ManifestCreationServiceImpl) getAppMetricsByTriggerType(overrideRequ return appMetrics, nil } -func (impl *ManifestCreationServiceImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, - releaseOverrideJson string, - configMapJson []byte, - appLabelJsonByte []byte, - strategy *chartConfig.PipelineStrategy, -) (mergedValues []byte, err error) { - +func (impl *ManifestCreationServiceImpl) mergeOverrideValues(envOverride *chartConfig.EnvConfigOverride, releaseOverrideJson string, + configMapJson []byte, appLabelJsonByte []byte, strategy *chartConfig.PipelineStrategy) (mergedValues []byte, err error) { //merge three values on the fly //ordering is important here //global < environment < db< release var merged []byte + var templateOverrideValuesByte []byte if !envOverride.IsOverride { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.Chart.ResolvedGlobalOverride)) - if err != nil { - return nil, err - } + templateOverrideValuesByte = []byte(envOverride.Chart.ResolvedGlobalOverride) } else { - merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), []byte(envOverride.ResolvedEnvOverrideValues)) - if err != nil { - return nil, err - } + templateOverrideValuesByte = []byte(envOverride.ResolvedEnvOverrideValues) + } + merged, err = impl.mergeUtil.JsonPatch([]byte("{}"), templateOverrideValuesByte) + if err != nil { + impl.logger.Errorw("error in merging deployment template override values", "err", err, "overrideValues", templateOverrideValuesByte) + return nil, err } if strategy != nil && len(strategy.Config) > 0 { merged, err = impl.mergeUtil.JsonPatch(merged, []byte(strategy.Config)) @@ -792,7 +777,7 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte return merged } - hpaResourceRequest := getAutoScalingReplicaCount(templateMap, appName) + hpaResourceRequest := helper.GetAutoScalingReplicaCount(templateMap, appName) impl.logger.Debugw("autoscalingCheckBeforeTrigger", "hpaResourceRequest", hpaResourceRequest) if hpaResourceRequest.IsEnable { resourceManifest := make(map[string]interface{}) @@ -839,7 +824,7 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte return merged } - reqReplicaCount := fetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) + reqReplicaCount := helper.FetchRequiredReplicaCount(currentReplicaCount, hpaResourceRequest.ReqMaxReplicas, hpaResourceRequest.ReqMinReplicas) templateMap["replicaCount"] = reqReplicaCount merged, err = json.Marshal(&templateMap) if err != nil { @@ -854,12 +839,12 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte //check for custom chart support if autoscalingEnabledPath, ok := templateMap[bean2.CustomAutoScalingEnabledPathKey]; ok { if deploymentType == models.DEPLOYMENTTYPE_STOP { - merged, err = setScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) + merged, err = helper.SetScalingValues(templateMap, bean2.CustomAutoScalingEnabledPathKey, merged, false) if err != nil { impl.logger.Errorw("error occurred while setting autoscaling key", "templateMap", templateMap, "err", err) return merged } - merged, err = setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) + merged, err = helper.SetScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, 0) if err != nil { impl.logger.Errorw("error occurred while setting autoscaling key", "templateMap", templateMap, "err", err) return merged @@ -876,7 +861,7 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte if err != nil { return merged } - merged, err = setScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) + merged, err = helper.SetScalingValues(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged, replicaCount) if err != nil { impl.logger.Errorw("error occurred while setting autoscaling key", "templateMap", templateMap, "err", err) return merged @@ -889,135 +874,20 @@ func (impl *ManifestCreationServiceImpl) autoscalingCheckBeforeTrigger(ctx conte } func (impl *ManifestCreationServiceImpl) getReplicaCountFromCustomChart(templateMap map[string]interface{}, merged []byte) (float64, error) { - autoscalingMinVal, err := extractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) + autoscalingMinVal, err := helper.ExtractParamValue(templateMap, bean2.CustomAutoscalingMinPathKey, merged) if err != nil { impl.logger.Errorw("error occurred while parsing float number", "key", bean2.CustomAutoscalingMinPathKey, "err", err) return 0, err } - autoscalingMaxVal, err := extractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) + autoscalingMaxVal, err := helper.ExtractParamValue(templateMap, bean2.CustomAutoscalingMaxPathKey, merged) if err != nil { impl.logger.Errorw("error occurred while parsing float number", "key", bean2.CustomAutoscalingMaxPathKey, "err", err) return 0, err } - autoscalingReplicaCountVal, err := extractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) + autoscalingReplicaCountVal, err := helper.ExtractParamValue(templateMap, bean2.CustomAutoscalingReplicaCountPathKey, merged) if err != nil { impl.logger.Errorw("error occurred while parsing float number", "key", bean2.CustomAutoscalingReplicaCountPathKey, "err", err) return 0, err } - return fetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil -} - -func extractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { - if _, ok := inputMap[key]; !ok { - return 0, errors.New("empty-val-err") - } - return util4.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) -} - -func setScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { - autoscalingJsonPath := templateMap[customScalingKey] - autoscalingJsonPathKey := autoscalingJsonPath.(string) - mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) - if err != nil { - return []byte{}, err - } - return []byte(mergedRes), nil -} - -func fetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { - var reqReplicaCount float64 - if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { - reqReplicaCount = currentReplicaCount - } else if currentReplicaCount > reqMaxReplicas { - reqReplicaCount = reqMaxReplicas - } else if currentReplicaCount < reqMinReplicas { - reqReplicaCount = reqMinReplicas - } - return reqReplicaCount -} - -func getAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util4.HpaResourceRequest { - hasOverride := false - if _, ok := templateMap[bean3.FullnameOverride]; ok { - appNameOverride := templateMap[bean3.FullnameOverride].(string) - if len(appNameOverride) > 0 { - appName = appNameOverride - hasOverride = true - } - } - if !hasOverride { - if _, ok := templateMap[bean3.NameOverride]; ok { - nameOverride := templateMap[bean3.NameOverride].(string) - if len(nameOverride) > 0 { - appName = fmt.Sprintf("%s-%s", appName, nameOverride) - } - } - } - hpaResourceRequest := &util4.HpaResourceRequest{} - hpaResourceRequest.Version = "" - hpaResourceRequest.Group = autoscaling.ServiceName - hpaResourceRequest.Kind = bean3.HorizontalPodAutoscaler - if _, ok := templateMap[bean3.KedaAutoscaling]; ok { - as := templateMap[bean3.KedaAutoscaling] - asd := as.(map[string]interface{}) - if _, ok := asd[bean3.Enabled]; ok { - enable := asd[bean3.Enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = enable - hpaResourceRequest.ReqReplicaCount = templateMap[bean3.ReplicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") - return hpaResourceRequest - } - } - } - - if _, ok := templateMap[autoscaling.ServiceName]; ok { - as := templateMap[autoscaling.ServiceName] - asd := as.(map[string]interface{}) - if _, ok := asd[bean3.Enabled]; ok { - enable := asd[bean3.Enabled].(bool) - if enable { - hpaResourceRequest.IsEnable = asd[bean3.Enabled].(bool) - hpaResourceRequest.ReqReplicaCount = templateMap[bean3.ReplicaCount].(float64) - hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) - hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) - hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") - return hpaResourceRequest - } - } - } - return hpaResourceRequest - -} - -func createConfigMapAndSecretJsonRequest(overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride, chartVersion string, scope resourceQualifiers.Scope) bean3.ConfigMapAndSecretJsonV2 { - request := bean3.ConfigMapAndSecretJsonV2{ - AppId: overrideRequest.AppId, - EnvId: envOverride.TargetEnvironment, - PipeLineId: overrideRequest.PipelineId, - ChartVersion: chartVersion, - DeploymentWithConfig: overrideRequest.DeploymentWithConfig, - WfrIdForDeploymentWithSpecificTrigger: overrideRequest.WfrIdForDeploymentWithSpecificTrigger, - Scope: scope, - } - return request -} - -func getScopeForVariables(overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride) resourceQualifiers.Scope { - scope := resourceQualifiers.Scope{ - AppId: overrideRequest.AppId, - EnvId: envOverride.TargetEnvironment, - ClusterId: envOverride.Environment.Id, - SystemMetadata: &resourceQualifiers.SystemMetadata{ - EnvironmentName: envOverride.Environment.Name, - ClusterName: envOverride.Environment.Cluster.ClusterName, - Namespace: envOverride.Environment.Namespace, - ImageTag: util3.GetImageTagFromImage(overrideRequest.Image), - AppName: overrideRequest.AppName, - Image: overrideRequest.Image, - }, - } - return scope + return helper.FetchRequiredReplicaCount(autoscalingReplicaCountVal, autoscalingMaxVal, autoscalingMinVal), nil } diff --git a/pkg/deployment/manifest/helper/helper.go b/pkg/deployment/manifest/helper/helper.go new file mode 100644 index 0000000000..d908ed1a81 --- /dev/null +++ b/pkg/deployment/manifest/helper/helper.go @@ -0,0 +1,155 @@ +package helper + +import ( + "errors" + "fmt" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/devtron-labs/devtron/api/bean" + "github.com/devtron-labs/devtron/internal/sql/models" + "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" + chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" + bean3 "github.com/devtron-labs/devtron/pkg/deployment/manifest/bean" + "github.com/devtron-labs/devtron/pkg/resourceQualifiers" + util3 "github.com/devtron-labs/devtron/pkg/util" + util4 "github.com/devtron-labs/devtron/util" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" +) + +func ResolveDeploymentTypeAndUpdate(overrideRequest *bean.ValuesOverrideRequest) { + if overrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { + overrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY + } + if len(overrideRequest.DeploymentWithConfig) == 0 { + overrideRequest.DeploymentWithConfig = bean.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED + } +} + +func GetDeploymentTemplateType(overrideRequest *bean.ValuesOverrideRequest) chartRepoRepository.DeploymentStrategy { + var deploymentTemplate chartRepoRepository.DeploymentStrategy + if overrideRequest.DeploymentTemplate == "ROLLING" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_ROLLING + } else if overrideRequest.DeploymentTemplate == "BLUE-GREEN" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_BLUE_GREEN + } else if overrideRequest.DeploymentTemplate == "CANARY" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_CANARY + } else if overrideRequest.DeploymentTemplate == "RECREATE" { + deploymentTemplate = chartRepoRepository.DEPLOYMENT_STRATEGY_RECREATE + } + return deploymentTemplate +} + +func ExtractParamValue(inputMap map[string]interface{}, key string, merged []byte) (float64, error) { + if _, ok := inputMap[key]; !ok { + return 0, errors.New("empty-val-err") + } + return util4.ParseFloatNumber(gjson.Get(string(merged), inputMap[key].(string)).Value()) +} + +func SetScalingValues(templateMap map[string]interface{}, customScalingKey string, merged []byte, value interface{}) ([]byte, error) { + autoscalingJsonPath := templateMap[customScalingKey] + autoscalingJsonPathKey := autoscalingJsonPath.(string) + mergedRes, err := sjson.Set(string(merged), autoscalingJsonPathKey, value) + if err != nil { + return []byte{}, err + } + return []byte(mergedRes), nil +} + +func FetchRequiredReplicaCount(currentReplicaCount float64, reqMaxReplicas float64, reqMinReplicas float64) float64 { + var reqReplicaCount float64 + if currentReplicaCount <= reqMaxReplicas && currentReplicaCount >= reqMinReplicas { + reqReplicaCount = currentReplicaCount + } else if currentReplicaCount > reqMaxReplicas { + reqReplicaCount = reqMaxReplicas + } else if currentReplicaCount < reqMinReplicas { + reqReplicaCount = reqMinReplicas + } + return reqReplicaCount +} + +func GetAutoScalingReplicaCount(templateMap map[string]interface{}, appName string) *util4.HpaResourceRequest { + hasOverride := false + if _, ok := templateMap[bean3.FullnameOverride]; ok { + appNameOverride := templateMap[bean3.FullnameOverride].(string) + if len(appNameOverride) > 0 { + appName = appNameOverride + hasOverride = true + } + } + if !hasOverride { + if _, ok := templateMap[bean3.NameOverride]; ok { + nameOverride := templateMap[bean3.NameOverride].(string) + if len(nameOverride) > 0 { + appName = fmt.Sprintf("%s-%s", appName, nameOverride) + } + } + } + hpaResourceRequest := &util4.HpaResourceRequest{} + hpaResourceRequest.Version = "" + hpaResourceRequest.Group = autoscaling.ServiceName + hpaResourceRequest.Kind = bean3.HorizontalPodAutoscaler + if _, ok := templateMap[bean3.KedaAutoscaling]; ok { + as := templateMap[bean3.KedaAutoscaling] + asd := as.(map[string]interface{}) + if _, ok := asd[bean3.Enabled]; ok { + enable := asd[bean3.Enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = enable + hpaResourceRequest.ReqReplicaCount = templateMap[bean3.ReplicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["maxReplicaCount"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["minReplicaCount"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s-%s", "keda-hpa", appName, "keda") + return hpaResourceRequest + } + } + } + + if _, ok := templateMap[autoscaling.ServiceName]; ok { + as := templateMap[autoscaling.ServiceName] + asd := as.(map[string]interface{}) + if _, ok := asd[bean3.Enabled]; ok { + enable := asd[bean3.Enabled].(bool) + if enable { + hpaResourceRequest.IsEnable = asd[bean3.Enabled].(bool) + hpaResourceRequest.ReqReplicaCount = templateMap[bean3.ReplicaCount].(float64) + hpaResourceRequest.ReqMaxReplicas = asd["MaxReplicas"].(float64) + hpaResourceRequest.ReqMinReplicas = asd["MinReplicas"].(float64) + hpaResourceRequest.ResourceName = fmt.Sprintf("%s-%s", appName, "hpa") + return hpaResourceRequest + } + } + } + return hpaResourceRequest + +} + +func CreateConfigMapAndSecretJsonRequest(overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride, chartVersion string, scope resourceQualifiers.Scope) bean3.ConfigMapAndSecretJsonV2 { + request := bean3.ConfigMapAndSecretJsonV2{ + AppId: overrideRequest.AppId, + EnvId: envOverride.TargetEnvironment, + PipeLineId: overrideRequest.PipelineId, + ChartVersion: chartVersion, + DeploymentWithConfig: overrideRequest.DeploymentWithConfig, + WfrIdForDeploymentWithSpecificTrigger: overrideRequest.WfrIdForDeploymentWithSpecificTrigger, + Scope: scope, + } + return request +} + +func GetScopeForVariables(overrideRequest *bean.ValuesOverrideRequest, envOverride *chartConfig.EnvConfigOverride) resourceQualifiers.Scope { + scope := resourceQualifiers.Scope{ + AppId: overrideRequest.AppId, + EnvId: envOverride.TargetEnvironment, + ClusterId: envOverride.Environment.Id, + SystemMetadata: &resourceQualifiers.SystemMetadata{ + EnvironmentName: envOverride.Environment.Name, + ClusterName: envOverride.Environment.Cluster.ClusterName, + Namespace: envOverride.Environment.Namespace, + ImageTag: util3.GetImageTagFromImage(overrideRequest.Image), + AppName: overrideRequest.AppName, + Image: overrideRequest.Image, + }, + } + return scope +} diff --git a/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go index 55436508c6..9ddba1e858 100644 --- a/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go @@ -2,58 +2,35 @@ package devtronApps import ( "context" - "fmt" bean2 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" - repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" - "github.com/devtron-labs/devtron/pkg/sql" util2 "github.com/devtron-labs/devtron/util/event" - "strconv" "time" ) func (impl *TriggerServiceImpl) TriggerPostStage(request bean.TriggerRequest) error { + request.WorkflowType = bean2.CD_WORKFLOW_TYPE_POST //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() triggeredBy := request.TriggeredBy pipeline := request.Pipeline cdWf := request.CdWf - - runner := &pipelineConfig.CdWorkflowRunner{ - Name: pipeline.Name, - WorkflowType: bean2.CD_WORKFLOW_TYPE_POST, - ExecutorType: impl.config.GetWorkflowExecutorType(), - Status: pipelineConfig.WorkflowStarting, // starting PostStage - TriggeredBy: triggeredBy, - StartedOn: triggeredAt, - Namespace: impl.config.GetDefaultNamespace(), - BlobStorageEnabled: impl.config.BlobStorageEnabled, - CdWorkflowId: cdWf.Id, - LogLocation: fmt.Sprintf("%s/%s%s-%s/main.log", impl.config.GetDefaultBuildLogsKeyPrefix(), strconv.Itoa(cdWf.Id), string(bean2.CD_WORKFLOW_TYPE_POST), pipeline.Name), - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: triggeredBy, UpdatedOn: triggeredAt, UpdatedBy: triggeredBy}, - RefCdWorkflowRunnerId: request.RefCdWorkflowRunnerId, - ReferenceId: request.TriggerContext.ReferenceId, - } - var env *repository2.Environment - var err error - if pipeline.RunPostStageInEnv { - env, err = impl.envRepository.FindById(pipeline.EnvironmentId) - if err != nil { - impl.logger.Errorw(" unable to find env ", "err", err) - return err - } - runner.Namespace = env.Namespace + ctx := context.Background() //before there was only one context. To check why here we are not using ctx from request.TriggerContext + env, namespace, err := impl.getEnvAndNsIfRunStageInEnv(ctx, request) + if err != nil { + impl.logger.Errorw("error, getEnvAndNsIfRunStageInEnv", "err", err, "pipeline", pipeline, "stage", request.WorkflowType) + return nil } - - _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) + request.RunStageInEnvNamespace = namespace + cdWf, runner, err := impl.createStartingWfAndRunner(request, triggeredAt) if err != nil { + impl.logger.Errorw("error in creating wf starting and runner entry", "err", err, "request", request) return err } - if cdWf.CiArtifact == nil || cdWf.CiArtifact.Id == 0 { cdWf.CiArtifact, err = impl.ciArtifactRepository.Get(cdWf.CiArtifactId) if err != nil { @@ -77,26 +54,11 @@ func (impl *TriggerServiceImpl) TriggerPostStage(request bean.TriggerRequest) er } // custom GitOps repo url validation --> Ends - // checking vulnerability for the selected image - isVulnerable, err := impl.GetArtifactVulnerabilityStatus(cdWf.CiArtifact, pipeline, context.Background()) + err = impl.checkVulnerabilityStatusAndFailWfIfNeeded(ctx, cdWf.CiArtifact, pipeline, runner, triggeredBy) if err != nil { - impl.logger.Errorw("error in getting Artifact vulnerability status, TriggerPostStage", "err", err) + impl.logger.Errorw("error, checkVulnerabilityStatusAndFailWfIfNeeded", "err", err, "runner", runner) return err } - if isVulnerable { - // if image vulnerable, update timeline status and return - runner.Status = pipelineConfig.WorkflowFailed - runner.Message = pipelineConfig.FOUND_VULNERABILITY - runner.FinishedOn = time.Now() - runner.UpdatedOn = time.Now() - runner.UpdatedBy = triggeredBy - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) - if err != nil { - impl.logger.Errorw("error in updating wfr status due to vulnerable image", "err", err) - return err - } - return fmt.Errorf("found vulnerability for image digest %s", cdWf.CiArtifact.ImageDigest) - } cdStageWorkflowRequest, err := impl.buildWFRequest(runner, cdWf, pipeline, triggeredBy) if err != nil { impl.logger.Errorw("error in building wfRequest", "err", err, "runner", runner, "cdWf", cdWf, "pipeline", pipeline) diff --git a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go index 09af0061fa..12c85ebd8c 100644 --- a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go @@ -17,7 +17,7 @@ import ( "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" - pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/plugin" @@ -48,58 +48,22 @@ const ( ) func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) error { + request.WorkflowType = bean2.CD_WORKFLOW_TYPE_PRE //setting triggeredAt variable to have consistent data for various audit log places in db for deployment time triggeredAt := time.Now() triggeredBy := request.TriggeredBy artifact := request.Artifact pipeline := request.Pipeline ctx := request.TriggerContext.Context - //in case of pre stage manual trigger auth is already applied and for auto triggers there is no need for auth check here - cdWf := request.CdWf - var err error - if cdWf == nil { - cdWf = &pipelineConfig.CdWorkflow{ - CiArtifactId: artifact.Id, - PipelineId: pipeline.Id, - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: 1, UpdatedOn: triggeredAt, UpdatedBy: 1}, - } - err = impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) - if err != nil { - return err - } - } - cdWorkflowExecutorType := impl.config.GetWorkflowExecutorType() - runner := &pipelineConfig.CdWorkflowRunner{ - Name: pipeline.Name, - WorkflowType: bean2.CD_WORKFLOW_TYPE_PRE, - ExecutorType: cdWorkflowExecutorType, - Status: pipelineConfig.WorkflowStarting, // starting PreStage - TriggeredBy: triggeredBy, - StartedOn: triggeredAt, - Namespace: impl.config.GetDefaultNamespace(), - BlobStorageEnabled: impl.config.BlobStorageEnabled, - CdWorkflowId: cdWf.Id, - LogLocation: fmt.Sprintf("%s/%s%s-%s/main.log", impl.config.GetDefaultBuildLogsKeyPrefix(), strconv.Itoa(cdWf.Id), string(bean2.CD_WORKFLOW_TYPE_PRE), pipeline.Name), - AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: 1, UpdatedOn: triggeredAt, UpdatedBy: 1}, - RefCdWorkflowRunnerId: request.RefCdWorkflowRunnerId, - ReferenceId: request.TriggerContext.ReferenceId, - } - var env *repository2.Environment - if pipeline.RunPreStageInEnv { - _, span := otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") - env, err = impl.envRepository.FindById(pipeline.EnvironmentId) - span.End() - if err != nil { - impl.logger.Errorw(" unable to find env ", "err", err) - return err - } - impl.logger.Debugw("env", "env", env) - runner.Namespace = env.Namespace + env, namespace, err := impl.getEnvAndNsIfRunStageInEnv(ctx, request) + if err != nil { + impl.logger.Errorw("error, getEnvAndNsIfRunStageInEnv", "err", err, "pipeline", pipeline, "stage", request.WorkflowType) + return nil } - _, span := otel.Tracer("orchestrator").Start(ctx, "cdWorkflowRepository.SaveWorkFlowRunner") - _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) - span.End() + request.RunStageInEnvNamespace = namespace + cdWf, runner, err := impl.createStartingWfAndRunner(request, triggeredAt) if err != nil { + impl.logger.Errorw("error in creating wf starting and runner entry", "err", err, "request", request) return err } @@ -111,28 +75,13 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err } // custom GitOps repo url validation --> Ends - // checking vulnerability for the selected image - isVulnerable, err := impl.GetArtifactVulnerabilityStatus(artifact, pipeline, ctx) + //checking vulnerability for the selected image + err = impl.checkVulnerabilityStatusAndFailWfIfNeeded(ctx, artifact, pipeline, runner, triggeredBy) if err != nil { - impl.logger.Errorw("error in getting Artifact vulnerability status, TriggerPreStage", "err", err) + impl.logger.Errorw("error, checkVulnerabilityStatusAndFailWfIfNeeded", "err", err, "runner", runner) return err } - if isVulnerable { - // if image vulnerable, update timeline status and return - runner.Status = pipelineConfig.WorkflowFailed - runner.Message = pipelineConfig.FOUND_VULNERABILITY - runner.FinishedOn = time.Now() - runner.UpdatedOn = time.Now() - runner.UpdatedBy = triggeredBy - err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) - if err != nil { - impl.logger.Errorw("error in updating wfr status due to vulnerable image", "err", err) - return err - } - return fmt.Errorf("found vulnerability for image digest %s", artifact.ImageDigest) - } - - _, span = otel.Tracer("orchestrator").Start(ctx, "buildWFRequest") + _, span := otel.Tracer("orchestrator").Start(ctx, "buildWFRequest") cdStageWorkflowRequest, err := impl.buildWFRequest(runner, cdWf, pipeline, triggeredBy) span.End() if err != nil { @@ -154,7 +103,7 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") cdStageWorkflowRequest.Pipeline = pipeline cdStageWorkflowRequest.Env = env - cdStageWorkflowRequest.Type = pipelineConfigBean.CD_WORKFLOW_PIPELINE_TYPE + cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) span.End() err = impl.sendPreStageNotification(ctx, cdWf, pipeline) @@ -172,6 +121,99 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err return nil } +func (impl *TriggerServiceImpl) createStartingWfAndRunner(request bean.TriggerRequest, triggeredAt time.Time) (*pipelineConfig.CdWorkflow, *pipelineConfig.CdWorkflowRunner, error) { + triggeredBy := request.TriggeredBy + artifact := request.Artifact + pipeline := request.Pipeline + ctx := request.TriggerContext.Context + //in case of pre stage manual trigger auth is already applied and for auto triggers there is no need for auth check here + cdWf := request.CdWf + var err error + if cdWf == nil && request.WorkflowType == bean2.CD_WORKFLOW_TYPE_PRE { + cdWf = &pipelineConfig.CdWorkflow{ + CiArtifactId: artifact.Id, + PipelineId: pipeline.Id, + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: 1, UpdatedOn: triggeredAt, UpdatedBy: 1}, + } + err = impl.cdWorkflowRepository.SaveWorkFlow(ctx, cdWf) + if err != nil { + return nil, nil, err + } + } + runner := &pipelineConfig.CdWorkflowRunner{ + Name: pipeline.Name, + WorkflowType: request.WorkflowType, + ExecutorType: impl.config.GetWorkflowExecutorType(), + Status: pipelineConfig.WorkflowStarting, // starting PreStage + TriggeredBy: triggeredBy, + StartedOn: triggeredAt, + Namespace: request.RunStageInEnvNamespace, + BlobStorageEnabled: impl.config.BlobStorageEnabled, + CdWorkflowId: cdWf.Id, + LogLocation: fmt.Sprintf("%s/%s%s-%s/main.log", impl.config.GetDefaultBuildLogsKeyPrefix(), strconv.Itoa(cdWf.Id), request.WorkflowType, pipeline.Name), + AuditLog: sql.AuditLog{CreatedOn: triggeredAt, CreatedBy: 1, UpdatedOn: triggeredAt, UpdatedBy: 1}, + RefCdWorkflowRunnerId: request.RefCdWorkflowRunnerId, + ReferenceId: request.TriggerContext.ReferenceId, + } + _, span := otel.Tracer("orchestrator").Start(ctx, "cdWorkflowRepository.SaveWorkFlowRunner") + _, err = impl.cdWorkflowRepository.SaveWorkFlowRunner(runner) + span.End() + if err != nil { + return nil, nil, err + } + return cdWf, runner, nil +} + +func (impl *TriggerServiceImpl) getEnvAndNsIfRunStageInEnv(ctx context.Context, request bean.TriggerRequest) (*repository2.Environment, string, error) { + workflowStage := request.WorkflowType + pipeline := request.Pipeline + var env *repository2.Environment + var err error + namespace := impl.config.GetDefaultNamespace() + runStageInEnv := false + if workflowStage == bean2.CD_WORKFLOW_TYPE_PRE { + runStageInEnv = pipeline.RunPreStageInEnv + } else if workflowStage == bean2.CD_WORKFLOW_TYPE_POST { + runStageInEnv = pipeline.RunPostStageInEnv + } + _, span := otel.Tracer("orchestrator").Start(ctx, "envRepository.FindById") + env, err = impl.envRepository.FindById(pipeline.EnvironmentId) + span.End() + if err != nil { + impl.logger.Errorw(" unable to find env ", "err", err) + return nil, namespace, err + } + if runStageInEnv { + namespace = env.Namespace + } + return env, namespace, nil +} + +func (impl *TriggerServiceImpl) checkVulnerabilityStatusAndFailWfIfNeeded(ctx context.Context, artifact *repository.CiArtifact, + cdPipeline *pipelineConfig.Pipeline, runner *pipelineConfig.CdWorkflowRunner, triggeredBy int32) error { + //checking vulnerability for the selected image + isVulnerable, err := impl.GetArtifactVulnerabilityStatus(artifact, cdPipeline, ctx) + if err != nil { + impl.logger.Errorw("error in getting Artifact vulnerability status, TriggerPreStage", "err", err) + return err + } + if isVulnerable { + // if image vulnerable, update timeline status and return + runner.Status = pipelineConfig.WorkflowFailed + runner.Message = pipelineConfig.FOUND_VULNERABILITY + runner.FinishedOn = time.Now() + runner.UpdatedOn = time.Now() + runner.UpdatedBy = triggeredBy + err = impl.cdWorkflowRepository.UpdateWorkFlowRunner(runner) + if err != nil { + impl.logger.Errorw("error in updating wfr status due to vulnerable image", "err", err) + return err + } + return fmt.Errorf("found vulnerability for image digest %s", artifact.ImageDigest) + } + return nil +} + func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { copyContainerImagePluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(pipeline.COPY_CONTAINER_IMAGE) var imagePathReservationIds []int @@ -183,9 +225,9 @@ func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { var pipelineStageEntityType int if pipelineStage == types.PRE { - pipelineStageEntityType = pipelineConfigBean.EntityTypePreCD + pipelineStageEntityType = bean3.EntityTypePreCD } else { - pipelineStageEntityType = pipelineConfigBean.EntityTypePostCD + pipelineStageEntityType = bean3.EntityTypePostCD } customTagId := -1 var DockerImageTag string @@ -240,7 +282,7 @@ func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest } if len(savedCIArtifacts) > 0 { // if already present in ci artifact, return "image path already in use error" - return imagePathReservationIds, pipelineConfigBean.ErrImagePathInUse + return imagePathReservationIds, bean3.ErrImagePathInUse } imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { @@ -294,7 +336,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } - var ciProjectDetails []pipelineConfigBean.CiProjectDetails + var ciProjectDetails []bean3.CiProjectDetails var ciPipeline *pipelineConfig.CiPipeline if cdPipeline.CiPipelineId > 0 { ciPipeline, err = impl.ciPipelineRepository.FindById(cdPipeline.CiPipelineId) @@ -323,7 +365,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } - ciProjectDetail := pipelineConfigBean.CiProjectDetails{ + ciProjectDetail := bean3.CiProjectDetails{ GitRepository: ciMaterialCurrent.Material.GitConfiguration.URL, MaterialName: gitMaterial.Name, CheckoutPath: gitMaterial.CheckoutPath, @@ -331,7 +373,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow SourceType: m.Type, SourceValue: m.Value, Type: string(m.Type), - GitOptions: pipelineConfigBean.GitOptions{ + GitOptions: bean3.GitOptions{ UserName: gitMaterial.GitProvider.UserName, Password: gitMaterial.GitProvider.Password, SshPrivateKey: gitMaterial.GitProvider.SshPrivateKey, @@ -350,7 +392,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } ciProjectDetail.CommitTime = commitTime.Format(bean4.LayoutRFC3339) - } else if ciPipeline.PipelineType == string(pipelineConfigBean.CI_JOB) { + } else if ciPipeline.PipelineType == string(bean3.CI_JOB) { // This has been done to resolve unmarshalling issue in ci-runner, in case of no commit time(eg- polling container images) ciProjectDetail.CommitTime = time.Time{}.Format(bean4.LayoutRFC3339) } else { @@ -375,9 +417,9 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow var deployStageWfr pipelineConfig.CdWorkflowRunner var deployStageTriggeredByUserEmail string var pipelineReleaseCounter int - var preDeploySteps []*pipelineConfigBean.StepObject - var postDeploySteps []*pipelineConfigBean.StepObject - var refPluginsData []*pipelineConfigBean.RefPluginObject + var preDeploySteps []*bean3.StepObject + var postDeploySteps []*bean3.StepObject + var refPluginsData []*bean3.RefPluginObject //if pipeline_stage_steps present for pre-CD or post-CD then no need to add stageYaml to cdWorkflowRequest in that //case add PreDeploySteps and PostDeploySteps to cdWorkflowRequest, this is done for backward compatibility pipelineStage, err := impl.pipelineStageService.GetCdStageByCdPipelineIdAndStageType(cdPipeline.Id, runner.WorkflowType.WorkflowTypeToStageType()) @@ -394,7 +436,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow //Scope will pick the environment of CD pipeline irrespective of in-cluster mode, //since user sees the environment of the CD pipeline scope := resourceQualifiers.Scope{ - AppId: cdPipeline.AppId, + AppId: cdPipeline.App.Id, EnvId: env.Id, ClusterId: env.ClusterId, SystemMetadata: &resourceQualifiers.SystemMetadata{ @@ -870,15 +912,15 @@ func (impl *TriggerServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, return imagePathReservationIds, nil } -func setExtraEnvVariableInDeployStep(deploySteps []*pipelineConfigBean.StepObject, extraEnvVariables map[string]string, webhookAndCiData *gitSensorClient.WebhookAndCiData) { +func setExtraEnvVariableInDeployStep(deploySteps []*bean3.StepObject, extraEnvVariables map[string]string, webhookAndCiData *gitSensorClient.WebhookAndCiData) { for _, deployStep := range deploySteps { for variableKey, variableValue := range extraEnvVariables { if isExtraVariableDynamic(variableKey, webhookAndCiData) && deployStep.StepType == "INLINE" { - extraInputVar := &pipelineConfigBean.VariableObject{ + extraInputVar := &bean3.VariableObject{ Name: variableKey, Format: "STRING", Value: variableValue, - VariableType: pipelineConfigBean.VARIABLE_TYPE_REF_GLOBAL, + VariableType: bean3.VARIABLE_TYPE_REF_GLOBAL, ReferenceVariableName: variableKey, } deployStep.InputVars = append(deployStep.InputVars, extraInputVar) diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index 995659f69f..81b21d1852 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -36,6 +36,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/deployment/manifest" bean5 "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" + "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/adapter" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/helper" clientErrors "github.com/devtron-labs/devtron/pkg/errors" @@ -81,9 +82,6 @@ type TriggerService interface { TriggerRelease(overrideRequest *bean3.ValuesOverrideRequest, valuesOverrideResponse *app.ValuesOverrideResponse, builtChartPath string, ctx context.Context, triggeredAt time.Time, triggeredBy int32) (releaseNo int, manifest []byte, err error) - - //TODO: make this method private and move all usages in this service since TriggerService should own if async mode is enabled and if yes then how to act on it - IsDevtronAsyncInstallModeEnabled(deploymentAppType string) bool } type TriggerServiceImpl struct { @@ -295,7 +293,7 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte impl.logger.Errorw("manual trigger request with invalid pipelineId, ManualCdTrigger", "pipelineId", overrideRequest.PipelineId, "err", err) return 0, err } - SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) + adapter.SetPipelineFieldsInOverrideRequest(overrideRequest, cdPipeline) switch overrideRequest.CdWorkflowType { case bean3.CD_WORKFLOW_TYPE_PRE: @@ -431,7 +429,7 @@ func (impl *TriggerServiceImpl) ManualCdTrigger(triggerContext bean.TriggerConte } // skip updatePreviousDeploymentStatus if Async Install is enabled; handled inside SubscribeDevtronAsyncHelmInstallRequest - if !impl.IsDevtronAsyncInstallModeEnabled(cdPipeline.DeploymentAppType) { + if !impl.isDevtronAsyncInstallModeEnabled(cdPipeline.DeploymentAppType) { // Update previous deployment runner status (in transaction): Failed _, span = otel.Tracer("orchestrator").Start(ctx, "updatePreviousDeploymentStatus") err1 := impl.cdWorkflowCommonService.UpdatePreviousDeploymentStatus(runner, cdPipeline.Id, triggeredAt, overrideRequest.UserId) @@ -625,7 +623,7 @@ func (impl *TriggerServiceImpl) TriggerAutomaticDeployment(request bean.TriggerR return releaseErr } //skip updatePreviousDeploymentStatus if Async Install is enabled; handled inside SubscribeDevtronAsyncHelmInstallRequest - if !impl.IsDevtronAsyncInstallModeEnabled(pipeline.DeploymentAppType) { + if !impl.isDevtronAsyncInstallModeEnabled(pipeline.DeploymentAppType) { err1 := impl.cdWorkflowCommonService.UpdatePreviousDeploymentStatus(runner, pipeline.Id, triggeredAt, triggeredBy) if err1 != nil { impl.logger.Errorw("error while update previous cd workflow runners", "err", err, "runner", runner, "pipelineId", pipeline.Id) @@ -681,7 +679,7 @@ func (impl *TriggerServiceImpl) releasePipeline(pipeline *pipelineConfig.Pipelin DeploymentWithConfig: bean3.DEPLOYMENT_CONFIG_TYPE_LAST_SAVED, WfrId: wfrId, } - SetPipelineFieldsInOverrideRequest(request, pipeline) + adapter.SetPipelineFieldsInOverrideRequest(request, pipeline) ctx, err := impl.argoUserService.BuildACDContext() if err != nil { @@ -698,20 +696,9 @@ func (impl *TriggerServiceImpl) releasePipeline(pipeline *pipelineConfig.Pipelin return err } -func SetPipelineFieldsInOverrideRequest(overrideRequest *bean3.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { - overrideRequest.PipelineId = pipeline.Id - overrideRequest.PipelineName = pipeline.Name - overrideRequest.EnvId = pipeline.EnvironmentId - overrideRequest.EnvName = pipeline.Environment.Name - overrideRequest.ClusterId = pipeline.Environment.ClusterId - overrideRequest.AppId = pipeline.AppId - overrideRequest.AppName = pipeline.App.AppName - overrideRequest.DeploymentAppType = pipeline.DeploymentAppType -} - func (impl *TriggerServiceImpl) HandleCDTriggerRelease(overrideRequest *bean3.ValuesOverrideRequest, ctx context.Context, triggeredAt time.Time, deployedBy int32) (releaseNo int, manifest []byte, err error) { - if impl.IsDevtronAsyncInstallModeEnabled(overrideRequest.DeploymentAppType) { + if impl.isDevtronAsyncInstallModeEnabled(overrideRequest.DeploymentAppType) { // asynchronous mode of installation starts return impl.workflowEventPublishService.TriggerHelmAsyncRelease(overrideRequest, ctx, triggeredAt, deployedBy) } @@ -986,7 +973,7 @@ func (impl *TriggerServiceImpl) createHelmAppForCdPipeline(overrideRequest *bean HistoryMax: impl.helmAppService.GetRevisionHistoryMaxValue(bean6.SOURCE_DEVTRON_APP), ChartContent: &gRPC.ChartContent{Content: referenceChartByte}, } - if impl.IsDevtronAsyncInstallModeEnabled(bean.Helm) { + if impl.isDevtronAsyncInstallModeEnabled(bean.Helm) { req.RunInCtx = true } // For cases where helm release was not found, kubelink will install the same configuration @@ -1181,7 +1168,7 @@ func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(appId int, envCo TargetNamespace: appNamespace, TargetServer: envModel.Cluster.ServerUrl, Project: "default", - ValuesFile: getValuesFileForEnv(envModel.Id), + ValuesFile: helper.GetValuesFileForEnv(envModel.Id), RepoPath: chart.ChartLocation, RepoUrl: chart.GitRepoUrl, AutoSyncEnabled: impl.ACDConfig.ArgoCDAutoSyncEnabled, @@ -1200,10 +1187,6 @@ func (impl *TriggerServiceImpl) createArgoApplicationIfRequired(appId int, envCo } } -func getValuesFileForEnv(environmentId int) string { - return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml -} - func (impl *TriggerServiceImpl) updatePipeline(pipeline *pipelineConfig.Pipeline, userId int32) (bool, error) { err := impl.pipelineRepository.SetDeploymentAppCreatedInPipeline(true, pipeline.Id, userId) if err != nil { @@ -1221,7 +1204,7 @@ func (impl *TriggerServiceImpl) helmInstallReleaseWithCustomChart(ctx context.Co ChartContent: &gRPC.ChartContent{Content: referenceChartByte}, ReleaseIdentifier: releaseIdentifier, } - if impl.IsDevtronAsyncInstallModeEnabled(bean.Helm) { + if impl.isDevtronAsyncInstallModeEnabled(bean.Helm) { helmInstallRequest.RunInCtx = true } // Request exec @@ -1325,7 +1308,7 @@ func (impl *TriggerServiceImpl) markImageScanDeployed(appId int, envId int, imag return err } -func (impl *TriggerServiceImpl) IsDevtronAsyncInstallModeEnabled(deploymentAppType string) bool { +func (impl *TriggerServiceImpl) isDevtronAsyncInstallModeEnabled(deploymentAppType string) bool { return impl.globalEnvVariables.EnableAsyncInstallDevtronChart && deploymentAppType == bean.Helm } diff --git a/pkg/deployment/trigger/devtronApps/adapter/adapter.go b/pkg/deployment/trigger/devtronApps/adapter/adapter.go new file mode 100644 index 0000000000..4144c266c1 --- /dev/null +++ b/pkg/deployment/trigger/devtronApps/adapter/adapter.go @@ -0,0 +1,17 @@ +package adapter + +import ( + bean3 "github.com/devtron-labs/devtron/api/bean" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" +) + +func SetPipelineFieldsInOverrideRequest(overrideRequest *bean3.ValuesOverrideRequest, pipeline *pipelineConfig.Pipeline) { + overrideRequest.PipelineId = pipeline.Id + overrideRequest.PipelineName = pipeline.Name + overrideRequest.EnvId = pipeline.EnvironmentId + overrideRequest.EnvName = pipeline.Environment.Name + overrideRequest.ClusterId = pipeline.Environment.ClusterId + overrideRequest.AppId = pipeline.AppId + overrideRequest.AppName = pipeline.App.AppName + overrideRequest.DeploymentAppType = pipeline.DeploymentAppType +} diff --git a/pkg/deployment/trigger/devtronApps/bean/bean.go b/pkg/deployment/trigger/devtronApps/bean/bean.go index 8fd4c283ae..e4cde70cc3 100644 --- a/pkg/deployment/trigger/devtronApps/bean/bean.go +++ b/pkg/deployment/trigger/devtronApps/bean/bean.go @@ -2,6 +2,7 @@ package bean import ( "context" + "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "time" @@ -22,12 +23,14 @@ type TriggerEvent struct { } type TriggerRequest struct { - CdWf *pipelineConfig.CdWorkflow - Pipeline *pipelineConfig.Pipeline - Artifact *repository.CiArtifact - ApplyAuth bool - TriggeredBy int32 - RefCdWorkflowRunnerId int + CdWf *pipelineConfig.CdWorkflow + Pipeline *pipelineConfig.Pipeline + Artifact *repository.CiArtifact + ApplyAuth bool + TriggeredBy int32 + RefCdWorkflowRunnerId int + RunStageInEnvNamespace string + WorkflowType bean.WorkflowType TriggerContext } diff --git a/pkg/deployment/trigger/devtronApps/helper/helper.go b/pkg/deployment/trigger/devtronApps/helper/helper.go index a46b63f624..6865bbcc02 100644 --- a/pkg/deployment/trigger/devtronApps/helper/helper.go +++ b/pkg/deployment/trigger/devtronApps/helper/helper.go @@ -2,12 +2,17 @@ package helper import ( errors3 "errors" + "fmt" bean2 "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" errors2 "github.com/juju/errors" "time" ) +func GetValuesFileForEnv(environmentId int) string { + return fmt.Sprintf("_%d-values.yaml", environmentId) //-{envId}-values.yaml +} + func GetTriggerEvent(deploymentAppType string, triggeredAt time.Time, deployedBy int32) bean.TriggerEvent { // trigger event will decide whether to perform GitOps or deployment for a particular deployment app type triggerEvent := bean.TriggerEvent{ diff --git a/pkg/eventProcessor/CentralEventProcessorService.go b/pkg/eventProcessor/CentralEventProcessorService.go index af65c90c38..784b5c339b 100644 --- a/pkg/eventProcessor/CentralEventProcessorService.go +++ b/pkg/eventProcessor/CentralEventProcessorService.go @@ -6,15 +6,27 @@ import ( ) type CentralEventProcessor struct { - logger *zap.SugaredLogger - workflowEventProcessor *in.WorkflowEventProcessorImpl + logger *zap.SugaredLogger + workflowEventProcessor *in.WorkflowEventProcessorImpl + ciPipelineEventProcessor *in.CIPipelineEventProcessorImpl + cdPipelineEventProcessor *in.CDPipelineEventProcessorImpl + deployedApplicationEventProcessorImpl *in.DeployedApplicationEventProcessorImpl + appStoreAppsEventProcessorImpl *in.AppStoreAppsEventProcessorImpl } -func NewCentralEventProcessor(workflowEventProcessor *in.WorkflowEventProcessorImpl, - logger *zap.SugaredLogger) (*CentralEventProcessor, error) { +func NewCentralEventProcessor(logger *zap.SugaredLogger, + workflowEventProcessor *in.WorkflowEventProcessorImpl, + ciPipelineEventProcessor *in.CIPipelineEventProcessorImpl, + cdPipelineEventProcessor *in.CDPipelineEventProcessorImpl, + deployedApplicationEventProcessorImpl *in.DeployedApplicationEventProcessorImpl, + appStoreAppsEventProcessorImpl *in.AppStoreAppsEventProcessorImpl) (*CentralEventProcessor, error) { cep := &CentralEventProcessor{ - workflowEventProcessor: workflowEventProcessor, - logger: logger, + logger: logger, + workflowEventProcessor: workflowEventProcessor, + ciPipelineEventProcessor: ciPipelineEventProcessor, + cdPipelineEventProcessor: cdPipelineEventProcessor, + deployedApplicationEventProcessorImpl: deployedApplicationEventProcessorImpl, + appStoreAppsEventProcessorImpl: appStoreAppsEventProcessorImpl, } err := cep.SubscribeAll() if err != nil { @@ -25,6 +37,33 @@ func NewCentralEventProcessor(workflowEventProcessor *in.WorkflowEventProcessorI func (impl *CentralEventProcessor) SubscribeAll() error { var err error + + //CI pipeline event starts + err = impl.ciPipelineEventProcessor.SubscribeNewCIMaterialEvent() + if err != nil { + impl.logger.Errorw("error, SubscribeNewCIMaterialEvent", "err", err) + return err + } + //CI pipeline event ends + + //CD pipeline event starts + + err = impl.cdPipelineEventProcessor.SubscribeCDBulkTriggerTopic() + if err != nil { + impl.logger.Errorw("error, SubscribeCDBulkTriggerTopic", "err", err) + return err + } + + err = impl.cdPipelineEventProcessor.SubscribeArgoTypePipelineSyncEvent() + if err != nil { + impl.logger.Errorw("error, SubscribeArgoTypePipelineSyncEvent", "err", err) + return err + } + + //CD pipeline event ends + + //Workflow event starts + err = impl.workflowEventProcessor.SubscribeCDStageCompleteEvent() if err != nil { impl.logger.Errorw("error, SubscribeCDStageCompleteEvent", "err", err) @@ -55,5 +94,49 @@ func (impl *CentralEventProcessor) SubscribeAll() error { impl.logger.Errorw("error, SubscribeCICompleteEvent", "err", err) return err } + err = impl.workflowEventProcessor.SubscribeDevtronAsyncHelmInstallRequest() + if err != nil { + impl.logger.Errorw("error, SubscribeDevtronAsyncHelmInstallRequest", "err", err) + return err + } + err = impl.workflowEventProcessor.SubscribeCDPipelineDeleteEvent() + if err != nil { + impl.logger.Errorw("error, SubscribeCDPipelineDeleteEvent", "err", err) + return err + } + + //Workflow event ends + + //Deployed application status event starts (currently only argo) + + err = impl.deployedApplicationEventProcessorImpl.SubscribeArgoAppUpdate() + if err != nil { + impl.logger.Errorw("error, SubscribeArgoAppUpdate", "err", err) + return err + } + err = impl.deployedApplicationEventProcessorImpl.SubscribeArgoAppDeleteStatus() + if err != nil { + impl.logger.Errorw("error, SubscribeArgoAppDeleteStatus", "err", err) + return err + } + + //Deployed application status event ends (currently only argo) + + //AppStore apps event starts + + err = impl.appStoreAppsEventProcessorImpl.SubscribeAppStoreAppsBulkDeployEvent() + if err != nil { + impl.logger.Errorw("error, SubscribeAppStoreAppsBulkDeployEvent", "err", err) + return err + } + + err = impl.appStoreAppsEventProcessorImpl.SubscribeHelmInstallStatusEvent() + if err != nil { + impl.logger.Errorw("error, SubscribeHelmInstallStatusEvent", "err", err) + return err + } + + //AppStore apps event ends + return nil } diff --git a/pkg/eventProcessor/bean/appStoreAppsEventBean.go b/pkg/eventProcessor/bean/appStoreAppsEventBean.go new file mode 100644 index 0000000000..189cc64c9a --- /dev/null +++ b/pkg/eventProcessor/bean/appStoreAppsEventBean.go @@ -0,0 +1,6 @@ +package bean + +type BulkDeployPayload struct { + InstalledAppVersionId int + InstalledAppVersionHistoryId int +} diff --git a/pkg/eventProcessor/bean/cdPipelineEventBean.go b/pkg/eventProcessor/bean/cdPipelineEventBean.go new file mode 100644 index 0000000000..504ab889a5 --- /dev/null +++ b/pkg/eventProcessor/bean/cdPipelineEventBean.go @@ -0,0 +1,8 @@ +package bean + +import "github.com/devtron-labs/devtron/api/bean" + +type BulkCDDeployEvent struct { + ValuesOverrideRequest *bean.ValuesOverrideRequest `json:"valuesOverrideRequest"` //TODO migrate this + UserId int32 `json:"userId"` +} diff --git a/pkg/eventProcessor/bean/deployedApplicationEventBean.go b/pkg/eventProcessor/bean/deployedApplicationEventBean.go new file mode 100644 index 0000000000..4e7f74cb5a --- /dev/null +++ b/pkg/eventProcessor/bean/deployedApplicationEventBean.go @@ -0,0 +1,18 @@ +package bean + +import ( + v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + "time" +) + +type ApplicationDetail struct { + Application *v1alpha12.Application `json:"application"` + StatusTime time.Time `json:"statusTime"` +} + +type ArgoPipelineStatusSyncEvent struct { + PipelineId int `json:"pipelineId"` + InstalledAppVersionId int `json:"installedAppVersionId"` + UserId int32 `json:"userId"` + IsAppStoreApplication bool `json:"isAppStoreApplication"` +} diff --git a/pkg/eventProcessor/bean/workflowEventBean.go b/pkg/eventProcessor/bean/workflowEventBean.go index 610fb33ba9..7534bc5459 100644 --- a/pkg/eventProcessor/bean/workflowEventBean.go +++ b/pkg/eventProcessor/bean/workflowEventBean.go @@ -1,6 +1,7 @@ package bean import ( + "context" "github.com/aws/aws-sdk-go-v2/service/ecr/types" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -51,3 +52,8 @@ type CiCompleteEvent struct { PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` PluginArtifactStage string `json:"pluginArtifactStage"` } + +type DevtronAppReleaseContextType struct { + CancelContext context.CancelFunc + RunnerId int +} diff --git a/pkg/eventProcessor/in/AppStoreAppsEventProcessorService.go b/pkg/eventProcessor/in/AppStoreAppsEventProcessorService.go new file mode 100644 index 0000000000..2b4775f205 --- /dev/null +++ b/pkg/eventProcessor/in/AppStoreAppsEventProcessorService.go @@ -0,0 +1,113 @@ +package in + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/pubsub-lib/model" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" + "github.com/devtron-labs/devtron/pkg/appStore/chartGroup" + "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" + "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" + "go.uber.org/zap" +) + +type AppStoreAppsEventProcessorImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl + chartGroupService chartGroup.ChartGroupService + + iavHistoryRepository repository.InstalledAppVersionHistoryRepository +} + +func NewAppStoreAppsEventProcessorImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl, + chartGroupService chartGroup.ChartGroupService, + iavHistoryRepository repository.InstalledAppVersionHistoryRepository) *AppStoreAppsEventProcessorImpl { + return &AppStoreAppsEventProcessorImpl{ + logger: logger, + pubSubClient: pubSubClient, + chartGroupService: chartGroupService, + iavHistoryRepository: iavHistoryRepository, + } +} + +func (impl *AppStoreAppsEventProcessorImpl) SubscribeAppStoreAppsBulkDeployEvent() error { + callback := func(msg *model.PubSubMsg) { + deployPayload := &bean.BulkDeployPayload{} + err := json.Unmarshal([]byte(msg.Data), &deployPayload) + if err != nil { + impl.logger.Error("Error while unmarshalling deployPayload json object", "error", err) + return + } + impl.logger.Debugw("deployPayload:", "deployPayload", deployPayload) + //using userId 1 - for system user + _, err = impl.chartGroupService.PerformDeployStage(deployPayload.InstalledAppVersionId, deployPayload.InstalledAppVersionHistoryId, 1) + if err != nil { + impl.logger.Errorw("error in performing deploy stage", "deployPayload", deployPayload, "err", err) + } + } + + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + deployPayload := &bean.BulkDeployPayload{} + err := json.Unmarshal([]byte(msg.Data), &deployPayload) + if err != nil { + return "error while unmarshalling deployPayload json object", []interface{}{"error", err} + } + return "got message for deploy app-store apps in bulk", []interface{}{"installedAppVersionId", deployPayload.InstalledAppVersionId, "installedAppVersionHistoryId", deployPayload.InstalledAppVersionHistoryId} + } + + err := impl.pubSubClient.Subscribe(pubsub.BULK_APPSTORE_DEPLOY_TOPIC, callback, loggerFunc) + if err != nil { + impl.logger.Error("err", err) + return err + } + return nil +} + +func (impl *AppStoreAppsEventProcessorImpl) SubscribeHelmInstallStatusEvent() error { + + callback := func(msg *model.PubSubMsg) { + + helmInstallNatsMessage := &appStoreBean.HelmReleaseStatusConfig{} + err := json.Unmarshal([]byte(msg.Data), helmInstallNatsMessage) + if err != nil { + impl.logger.Errorw("error in unmarshalling helm install status nats message", "err", err) + return + } + + installedAppVersionHistory, err := impl.iavHistoryRepository.GetInstalledAppVersionHistory(helmInstallNatsMessage.InstallAppVersionHistoryId) + if err != nil { + impl.logger.Errorw("error in fetching installed app by installed app id in subscribe helm status callback", "err", err) + return + } + if helmInstallNatsMessage.ErrorInInstallation { + installedAppVersionHistory.Status = pipelineConfig.WorkflowFailed + } else { + installedAppVersionHistory.Status = pipelineConfig.WorkflowSucceeded + } + installedAppVersionHistory.HelmReleaseStatusConfig = msg.Data + _, err = impl.iavHistoryRepository.UpdateInstalledAppVersionHistory(installedAppVersionHistory, nil) + if err != nil { + impl.logger.Errorw("error in updating helm release status data in installedAppVersionHistoryRepository", "err", err) + return + } + } + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + helmInstallNatsMessage := &appStoreBean.HelmReleaseStatusConfig{} + err := json.Unmarshal([]byte(msg.Data), helmInstallNatsMessage) + if err != nil { + return "error in unmarshalling helm install status nats message", []interface{}{"err", err} + } + return "got nats msg for helm chart install status", []interface{}{"InstallAppVersionHistoryId", helmInstallNatsMessage.InstallAppVersionHistoryId, "ErrorInInstallation", helmInstallNatsMessage.ErrorInInstallation, "IsReleaseInstalled", helmInstallNatsMessage.IsReleaseInstalled} + } + + err := impl.pubSubClient.Subscribe(pubsub.HELM_CHART_INSTALL_STATUS_TOPIC, callback, loggerFunc) + if err != nil { + impl.logger.Error(err) + return err + } + return nil +} diff --git a/pkg/eventProcessor/in/CDPipelineEventProcessorService.go b/pkg/eventProcessor/in/CDPipelineEventProcessorService.go new file mode 100644 index 0000000000..3e04d335b9 --- /dev/null +++ b/pkg/eventProcessor/in/CDPipelineEventProcessorService.go @@ -0,0 +1,149 @@ +package in + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/pubsub-lib/model" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + repository2 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" + "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps" + bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" + "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" + "github.com/devtron-labs/devtron/pkg/workflow/cd" + "github.com/devtron-labs/devtron/pkg/workflow/status" + "github.com/devtron-labs/devtron/util/argo" + "go.uber.org/zap" + "k8s.io/utils/pointer" +) + +type CDPipelineEventProcessorImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl + cdWorkflowCommonService cd.CdWorkflowCommonService + workflowStatusService status.WorkflowStatusService + cdTriggerService devtronApps.TriggerService + argoUserService argo.ArgoUserService + + pipelineRepository pipelineConfig.PipelineRepository + installedAppRepository repository2.InstalledAppRepository +} + +func NewCDPipelineEventProcessorImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl, + cdWorkflowCommonService cd.CdWorkflowCommonService, + workflowStatusService status.WorkflowStatusService, + cdTriggerService devtronApps.TriggerService, + argoUserService argo.ArgoUserService, + pipelineRepository pipelineConfig.PipelineRepository, + installedAppRepository repository2.InstalledAppRepository) *CDPipelineEventProcessorImpl { + cdPipelineEventProcessorImpl := &CDPipelineEventProcessorImpl{ + logger: logger, + pubSubClient: pubSubClient, + cdWorkflowCommonService: cdWorkflowCommonService, + workflowStatusService: workflowStatusService, + cdTriggerService: cdTriggerService, + argoUserService: argoUserService, + pipelineRepository: pipelineRepository, + installedAppRepository: installedAppRepository, + } + return cdPipelineEventProcessorImpl +} + +func (impl *CDPipelineEventProcessorImpl) SubscribeCDBulkTriggerTopic() error { + + callback := func(msg *model.PubSubMsg) { + event := &bean.BulkCDDeployEvent{} + err := json.Unmarshal([]byte(msg.Data), event) + if err != nil { + impl.logger.Errorw("Error unmarshalling received event", "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, "msg", msg.Data, "err", err) + return + } + event.ValuesOverrideRequest.UserId = event.UserId + // trigger + ctx, err := impl.argoUserService.BuildACDContext() + if err != nil { + impl.logger.Errorw("error in creating acd context", "err", err) + return + } + triggerContext := bean2.TriggerContext{ + ReferenceId: pointer.String(msg.MsgId), + Context: ctx, + } + _, err = impl.cdTriggerService.ManualCdTrigger(triggerContext, event.ValuesOverrideRequest) + if err != nil { + impl.logger.Errorw("Error triggering CD", "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, "msg", msg.Data, "err", err) + } + } + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + event := &bean.BulkCDDeployEvent{} + err := json.Unmarshal([]byte(msg.Data), event) + if err != nil { + return "error unmarshalling received event", []interface{}{"msg", msg.Data, "err", err} + } + return "got message for trigger cd in bulk", []interface{}{"pipelineId", event.ValuesOverrideRequest.PipelineId, "appId", event.ValuesOverrideRequest.AppId, "cdWorkflowType", event.ValuesOverrideRequest.CdWorkflowType, "ciArtifactId", event.ValuesOverrideRequest.CiArtifactId} + } + validations := impl.cdWorkflowCommonService.GetTriggerValidateFuncs() + err := impl.pubSubClient.Subscribe(pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, callback, loggerFunc, validations...) + if err != nil { + impl.logger.Error("failed to subscribe to NATS topic", "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, "err", err) + return err + } + return nil +} + +func (impl *CDPipelineEventProcessorImpl) SubscribeArgoTypePipelineSyncEvent() error { + callback := func(msg *model.PubSubMsg) { + statusUpdateEvent := bean.ArgoPipelineStatusSyncEvent{} + var err error + var cdPipeline *pipelineConfig.Pipeline + var installedApp repository2.InstalledApps + + err = json.Unmarshal([]byte(msg.Data), &statusUpdateEvent) + if err != nil { + impl.logger.Errorw("unmarshal error on argo pipeline status update event", "err", err) + return + } + + if statusUpdateEvent.IsAppStoreApplication { + installedApp, err = impl.installedAppRepository.GetInstalledAppByInstalledAppVersionId(statusUpdateEvent.InstalledAppVersionId) + if err != nil { + impl.logger.Errorw("error in getting installedAppVersion by id", "err", err, "id", statusUpdateEvent.PipelineId) + return + } + } else { + cdPipeline, err = impl.pipelineRepository.FindById(statusUpdateEvent.PipelineId) + if err != nil { + impl.logger.Errorw("error in getting cdPipeline by id", "err", err, "id", statusUpdateEvent.PipelineId) + return + } + } + + triggerContext := bean2.TriggerContext{ + ReferenceId: pointer.String(msg.MsgId), + } + + err, _ = impl.workflowStatusService.UpdatePipelineTimelineAndStatusByLiveApplicationFetch(triggerContext, cdPipeline, installedApp, statusUpdateEvent.UserId) + if err != nil { + impl.logger.Errorw("error on argo pipeline status update", "err", err, "msg", msg.Data) + return + } + } + + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + statusUpdateEvent := bean.ArgoPipelineStatusSyncEvent{} + err := json.Unmarshal([]byte(msg.Data), &statusUpdateEvent) + if err != nil { + return "unmarshal error on argo pipeline status update event", []interface{}{"err", err} + } + return "got message for argo pipeline status update", []interface{}{"pipelineId", statusUpdateEvent.PipelineId, "installedAppVersionId", statusUpdateEvent.InstalledAppVersionId, "isAppStoreApplication", statusUpdateEvent.IsAppStoreApplication} + } + + validations := impl.cdWorkflowCommonService.GetTriggerValidateFuncs() + err := impl.pubSubClient.Subscribe(pubsub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, callback, loggerFunc, validations...) + if err != nil { + impl.logger.Errorw("error in subscribing to argo application status update topic", "err", err) + return err + } + return nil +} diff --git a/pkg/eventProcessor/in/CIPipelineEventProcessorService.go b/pkg/eventProcessor/in/CIPipelineEventProcessorService.go new file mode 100644 index 0000000000..bf5821ba76 --- /dev/null +++ b/pkg/eventProcessor/in/CIPipelineEventProcessorService.go @@ -0,0 +1,61 @@ +package in + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/pubsub-lib/model" + "github.com/devtron-labs/devtron/client/gitSensor" + "github.com/devtron-labs/devtron/pkg/git" + "go.uber.org/zap" +) + +type CIPipelineEventProcessorImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl + gitWebhookService git.GitWebhookService +} + +func NewCIPipelineEventProcessorImpl(logger *zap.SugaredLogger, pubSubClient *pubsub.PubSubClientServiceImpl, + gitWebhookService git.GitWebhookService) *CIPipelineEventProcessorImpl { + ciPipelineEventProcessorImpl := &CIPipelineEventProcessorImpl{ + logger: logger, + pubSubClient: pubSubClient, + gitWebhookService: gitWebhookService, + } + return ciPipelineEventProcessorImpl +} + +func (impl *CIPipelineEventProcessorImpl) SubscribeNewCIMaterialEvent() error { + callback := func(msg *model.PubSubMsg) { + //defer msg.Ack() + ciPipelineMaterial := gitSensor.CiPipelineMaterial{} + err := json.Unmarshal([]byte(msg.Data), &ciPipelineMaterial) + if err != nil { + impl.logger.Error("Error while unmarshalling json response", "error", err) + return + } + resp, err := impl.gitWebhookService.HandleGitWebhook(ciPipelineMaterial) + impl.logger.Debug(resp) + if err != nil { + impl.logger.Error("err", err) + return + } + } + + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + ciPipelineMaterial := gitSensor.CiPipelineMaterial{} + err := json.Unmarshal([]byte(msg.Data), &ciPipelineMaterial) + if err != nil { + return "error while unmarshalling json response", []interface{}{"error", err} + } + return "got message for about new ci material", []interface{}{"ciPipelineMaterialId", ciPipelineMaterial.Id, "gitMaterialId", ciPipelineMaterial.GitMaterialId, "type", ciPipelineMaterial.Type} + } + + err := impl.pubSubClient.Subscribe(pubsub.NEW_CI_MATERIAL_TOPIC, callback, loggerFunc) + if err != nil { + impl.logger.Error("err", err) + return err + } + return nil +} diff --git a/api/router/pubsub/ApplicationStatusHandler.go b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go similarity index 76% rename from api/router/pubsub/ApplicationStatusHandler.go rename to pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go index c0ddd8d7c8..acdf437f53 100644 --- a/api/router/pubsub/ApplicationStatusHandler.go +++ b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go @@ -1,104 +1,75 @@ -/* - * Copyright (c) 2020 Devtron Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package pubsub +package in import ( "context" "encoding/json" "errors" "fmt" + v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/common-lib/pubsub-lib/model" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/app" + appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" + "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode" + "github.com/devtron-labs/devtron/pkg/bean" + "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" + bean3 "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" + "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/workflow/cd" "github.com/devtron-labs/devtron/pkg/workflow/dag" - "k8s.io/utils/pointer" - "time" - - "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" - appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" - repository4 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" - - v1alpha12 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" - "github.com/devtron-labs/devtron/pkg/bean" - "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/go-pg/pg" "go.uber.org/zap" + "k8s.io/utils/pointer" + "time" ) -type ApplicationStatusHandler interface { - Subscribe() error - SubscribeDeleteStatus() error -} - -type ApplicationStatusHandlerImpl struct { +type DeployedApplicationEventProcessorImpl struct { logger *zap.SugaredLogger - pubsubClient *pubsub.PubSubClientServiceImpl + pubSubClient *pubsub.PubSubClientServiceImpl appService app.AppService - workflowDagExecutor dag.WorkflowDagExecutor + gitOpsConfigReadService config.GitOpsConfigReadService installedAppService FullMode.InstalledAppDBExtendedService - appStoreDeploymentService service.AppStoreDeploymentService - pipelineBuilder pipeline.PipelineBuilder - pipelineRepository pipelineConfig.PipelineRepository - installedAppRepository repository4.InstalledAppRepository + workflowDagExecutor dag.WorkflowDagExecutor cdWorkflowCommonService cd.CdWorkflowCommonService + pipelineBuilder pipeline.PipelineBuilder + appStoreDeploymentService service.AppStoreDeploymentService + + pipelineRepository pipelineConfig.PipelineRepository + installedAppRepository repository.InstalledAppRepository } -func NewApplicationStatusHandlerImpl(logger *zap.SugaredLogger, pubsubClient *pubsub.PubSubClientServiceImpl, appService app.AppService, - workflowDagExecutor dag.WorkflowDagExecutor, installedAppService FullMode.InstalledAppDBExtendedService, - appStoreDeploymentService service.AppStoreDeploymentService, pipelineBuilder pipeline.PipelineBuilder, - pipelineRepository pipelineConfig.PipelineRepository, installedAppRepository repository4.InstalledAppRepository, - cdWorkflowCommonService cd.CdWorkflowCommonService) *ApplicationStatusHandlerImpl { - appStatusUpdateHandlerImpl := &ApplicationStatusHandlerImpl{ +func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, pubSubClient *pubsub.PubSubClientServiceImpl, + gitOpsConfigReadService config.GitOpsConfigReadService, + installedAppService FullMode.InstalledAppDBExtendedService, + workflowDagExecutor dag.WorkflowDagExecutor, + cdWorkflowCommonService cd.CdWorkflowCommonService, + pipelineBuilder pipeline.PipelineBuilder, + appStoreDeploymentService service.AppStoreDeploymentService, + pipelineRepository pipelineConfig.PipelineRepository, + installedAppRepository repository.InstalledAppRepository) *DeployedApplicationEventProcessorImpl { + deployedApplicationEventProcessorImpl := &DeployedApplicationEventProcessorImpl{ logger: logger, - pubsubClient: pubsubClient, - appService: appService, - workflowDagExecutor: workflowDagExecutor, + pubSubClient: pubSubClient, + gitOpsConfigReadService: gitOpsConfigReadService, installedAppService: installedAppService, - appStoreDeploymentService: appStoreDeploymentService, - pipelineBuilder: pipelineBuilder, - pipelineRepository: pipelineRepository, - installedAppRepository: installedAppRepository, + workflowDagExecutor: workflowDagExecutor, cdWorkflowCommonService: cdWorkflowCommonService, - } - err := appStatusUpdateHandlerImpl.Subscribe() - if err != nil { - // logger.Error("err", err) - return nil - } - err = appStatusUpdateHandlerImpl.SubscribeDeleteStatus() - if err != nil { - return nil - } - return appStatusUpdateHandlerImpl -} + pipelineBuilder: pipelineBuilder, + appStoreDeploymentService: appStoreDeploymentService, -type ApplicationDetail struct { - Application *v1alpha12.Application `json:"application"` - StatusTime time.Time `json:"statusTime"` + pipelineRepository: pipelineRepository, + installedAppRepository: installedAppRepository, + } + return deployedApplicationEventProcessorImpl } -func (impl *ApplicationStatusHandlerImpl) Subscribe() error { +func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppUpdate() error { callback := func(msg *model.PubSubMsg) { - applicationDetail := ApplicationDetail{} + applicationDetail := bean3.ApplicationDetail{} err := json.Unmarshal([]byte(msg.Data), &applicationDetail) if err != nil { impl.logger.Errorw("unmarshal error on app update status", "err", err) @@ -171,7 +142,7 @@ func (impl *ApplicationStatusHandlerImpl) Subscribe() error { } validations := impl.cdWorkflowCommonService.GetTriggerValidateFuncs() - err := impl.pubsubClient.Subscribe(pubsub.APPLICATION_STATUS_UPDATE_TOPIC, callback, loggerFunc, validations...) + err := impl.pubSubClient.Subscribe(pubsub.APPLICATION_STATUS_UPDATE_TOPIC, callback, loggerFunc, validations...) if err != nil { impl.logger.Error(err) return err @@ -179,10 +150,10 @@ func (impl *ApplicationStatusHandlerImpl) Subscribe() error { return nil } -func (impl *ApplicationStatusHandlerImpl) SubscribeDeleteStatus() error { +func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppDeleteStatus() error { callback := func(msg *model.PubSubMsg) { - applicationDetail := ApplicationDetail{} + applicationDetail := bean3.ApplicationDetail{} err := json.Unmarshal([]byte(msg.Data), &applicationDetail) if err != nil { impl.logger.Errorw("unmarshal error on app delete status", "err", err) @@ -202,7 +173,7 @@ func (impl *ApplicationStatusHandlerImpl) SubscribeDeleteStatus() error { // add required logging here var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - applicationDetail := ApplicationDetail{} + applicationDetail := bean3.ApplicationDetail{} err := json.Unmarshal([]byte(msg.Data), &applicationDetail) if err != nil { return "unmarshal error on app delete status", []interface{}{"err", err} @@ -210,7 +181,7 @@ func (impl *ApplicationStatusHandlerImpl) SubscribeDeleteStatus() error { return "got message for application status delete", []interface{}{"appName", applicationDetail.Application.Name, "namespace", applicationDetail.Application.Namespace, "deleteTimestamp", applicationDetail.Application.DeletionTimestamp} } - err := impl.pubsubClient.Subscribe(pubsub.APPLICATION_STATUS_DELETE_TOPIC, callback, loggerFunc) + err := impl.pubSubClient.Subscribe(pubsub.APPLICATION_STATUS_DELETE_TOPIC, callback, loggerFunc) if err != nil { impl.logger.Errorw("error in subscribing to argo application status delete topic", "err", err) return err @@ -218,7 +189,7 @@ func (impl *ApplicationStatusHandlerImpl) SubscribeDeleteStatus() error { return nil } -func (impl *ApplicationStatusHandlerImpl) updateArgoAppDeleteStatus(app *v1alpha12.Application) error { +func (impl *DeployedApplicationEventProcessorImpl) updateArgoAppDeleteStatus(app *v1alpha12.Application) error { pipeline, err := impl.pipelineRepository.GetArgoPipelineByArgoAppName(app.ObjectMeta.Name) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching pipeline from Pipeline Repository", "err", err) diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index c17c3433e6..b997b5ca54 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -1,20 +1,26 @@ package in import ( + "context" "encoding/json" + "errors" "fmt" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/aws/aws-sdk-go-v2/service/ecr/types" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/common-lib/pubsub-lib/model" bean2 "github.com/devtron-labs/devtron/api/bean" + client2 "github.com/devtron-labs/devtron/api/helm-app/service" client "github.com/devtron-labs/devtron/client/events" + "github.com/devtron-labs/devtron/internal/sql/models" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/app" bean4 "github.com/devtron-labs/devtron/pkg/auth/user/bean" "github.com/devtron-labs/devtron/pkg/deployment/deployedApp" bean6 "github.com/devtron-labs/devtron/pkg/deployment/deployedApp/bean" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps" + triggerAdapter "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/adapter" bean5 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" bean7 "github.com/devtron-labs/devtron/pkg/eventProcessor/out/bean" @@ -28,10 +34,13 @@ import ( util2 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" util "github.com/devtron-labs/devtron/util/event" + "github.com/go-pg/pg" "go.uber.org/zap" + "golang.org/x/exp/slices" "gopkg.in/go-playground/validator.v9" "k8s.io/utils/pointer" "strconv" + "sync" "time" ) @@ -52,6 +61,12 @@ type WorkflowEventProcessorImpl struct { validator *validator.Validate globalEnvVariables *util2.GlobalEnvVariables cdWorkflowCommonService cd.CdWorkflowCommonService + cdPipelineConfigService pipeline.CdPipelineConfigService + + devtronAppReleaseContextMap map[int]bean.DevtronAppReleaseContextType + devtronAppReleaseContextMapLock *sync.Mutex + appServiceConfig *app.AppServiceConfig + //repositories import to be removed pipelineRepository pipelineConfig.PipelineRepository ciArtifactRepository repository.CiArtifactRepository @@ -72,30 +87,39 @@ func NewWorkflowEventProcessorImpl(logger *zap.SugaredLogger, validator *validator.Validate, globalEnvVariables *util2.GlobalEnvVariables, cdWorkflowCommonService cd.CdWorkflowCommonService, + cdPipelineConfigService pipeline.CdPipelineConfigService, pipelineRepository pipelineConfig.PipelineRepository, ciArtifactRepository repository.CiArtifactRepository, cdWorkflowRepository pipelineConfig.CdWorkflowRepository) (*WorkflowEventProcessorImpl, error) { impl := &WorkflowEventProcessorImpl{ - logger: logger, - pubSubClient: pubSubClient, - cdWorkflowService: cdWorkflowService, - cdWorkflowRunnerService: cdWorkflowRunnerService, - argoUserService: argoUserService, - ciHandler: ciHandler, - cdHandler: cdHandler, - eventFactory: eventFactory, - eventClient: eventClient, - workflowDagExecutor: workflowDagExecutor, - cdTriggerService: cdTriggerService, - deployedAppService: deployedAppService, - webhookService: webhookService, - validator: validator, - globalEnvVariables: globalEnvVariables, - cdWorkflowCommonService: cdWorkflowCommonService, - pipelineRepository: pipelineRepository, - ciArtifactRepository: ciArtifactRepository, - cdWorkflowRepository: cdWorkflowRepository, + logger: logger, + pubSubClient: pubSubClient, + cdWorkflowService: cdWorkflowService, + cdWorkflowRunnerService: cdWorkflowRunnerService, + argoUserService: argoUserService, + ciHandler: ciHandler, + cdHandler: cdHandler, + eventFactory: eventFactory, + eventClient: eventClient, + workflowDagExecutor: workflowDagExecutor, + cdTriggerService: cdTriggerService, + deployedAppService: deployedAppService, + webhookService: webhookService, + validator: validator, + globalEnvVariables: globalEnvVariables, + cdWorkflowCommonService: cdWorkflowCommonService, + cdPipelineConfigService: cdPipelineConfigService, + devtronAppReleaseContextMap: make(map[int]bean.DevtronAppReleaseContextType), + devtronAppReleaseContextMapLock: &sync.Mutex{}, + pipelineRepository: pipelineRepository, + ciArtifactRepository: ciArtifactRepository, + cdWorkflowRepository: cdWorkflowRepository, + } + appServiceConfig, err := app.GetAppServiceConfig() + if err != nil { + return nil, err } + impl.appServiceConfig = appServiceConfig return impl, nil } @@ -595,3 +619,223 @@ func (impl *WorkflowEventProcessorImpl) BuildCIArtifactRequestForImageFromCR(ima } return request, nil } + +func (impl *WorkflowEventProcessorImpl) SubscribeDevtronAsyncHelmInstallRequest() error { + callback := func(msg *model.PubSubMsg) { + CDAsyncInstallNatsMessage, appIdentifier, err := impl.extractOverrideRequestFromCDAsyncInstallEvent(msg) + if err != nil { + impl.logger.Errorw("err on extracting override request, SubscribeDevtronAsyncHelmInstallRequest", "err", err) + return + } + toSkipProcess, err := impl.handleConcurrentOrInvalidRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest) + if err != nil { + impl.logger.Errorw("error, handleConcurrentOrInvalidRequest", "err", err, "req", CDAsyncInstallNatsMessage.ValuesOverrideRequest) + return + } + if toSkipProcess { + impl.logger.Debugw("skipping async helm install request", "req", CDAsyncInstallNatsMessage.ValuesOverrideRequest) + return + } + pipelineId := CDAsyncInstallNatsMessage.ValuesOverrideRequest.PipelineId + cdWfrId := CDAsyncInstallNatsMessage.ValuesOverrideRequest.WfrId + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(impl.appServiceConfig.DevtronChartInstallRequestTimeout)*time.Minute) + defer cancel() + impl.UpdateReleaseContextForPipeline(pipelineId, cdWfrId, cancel) + defer impl.cleanUpDevtronAppReleaseContextMap(pipelineId, cdWfrId) + err = impl.workflowDagExecutor.ProcessDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage, appIdentifier, ctx) + if err != nil { + impl.logger.Errorw("error, ProcessDevtronAsyncHelmInstallRequest", "err", err, "req", CDAsyncInstallNatsMessage) + return + } + return + } + + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + CDAsyncInstallNatsMessage := &bean.AsyncCdDeployEvent{} + err := json.Unmarshal([]byte(msg.Data), CDAsyncInstallNatsMessage) + if err != nil { + return "error in unmarshalling CD async install request nats message", []interface{}{"err", err} + } + return "got message for devtron chart install", []interface{}{"appId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.AppId, "pipelineId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.PipelineId, "artifactId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.CiArtifactId} + } + + err := impl.pubSubClient.Subscribe(pubsub.DEVTRON_CHART_INSTALL_TOPIC, callback, loggerFunc) + if err != nil { + impl.logger.Error(err) + return err + } + return nil +} + +func (impl *WorkflowEventProcessorImpl) handleConcurrentOrInvalidRequest(overrideRequest *bean2.ValuesOverrideRequest) (toSkipProcess bool, err error) { + pipelineId := overrideRequest.PipelineId + cdWfrId := overrideRequest.WfrId + cdWfr, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(cdWfrId) + if err != nil { + impl.logger.Errorw("err on fetching cd workflow runner, handleConcurrentOrInvalidRequest", "err", err, "cdWfrId", cdWfrId) + return toSkipProcess, err + } + pipelineObj, err := impl.pipelineRepository.FindById(overrideRequest.PipelineId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("err on fetching pipeline, handleConcurrentOrInvalidRequest", "err", err, "pipelineId", pipelineId) + return toSkipProcess, err + } else if err != pg.ErrNoRows || pipelineObj == nil || pipelineObj.Id == 0 { + impl.logger.Warnw("invalid request received pipeline not active, handleConcurrentOrInvalidRequest", "err", err, "pipelineId", pipelineId) + toSkipProcess = true + return toSkipProcess, err + } + impl.devtronAppReleaseContextMapLock.Lock() + defer impl.devtronAppReleaseContextMapLock.Unlock() + if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { + if releaseContext.RunnerId == cdWfrId { + //request in process for same wfrId, skipping and doing nothing + //earlier we used to check if wfrStatus is in starting then only skip, removed that + toSkipProcess = true + return toSkipProcess, nil + } else { + //request in process but for other wfrId + // skip if the cdWfr.Status is already in a terminal state + skipCDWfrStatusList := append(pipelineConfig.WfrTerminalStatusList, pipelineConfig.WorkflowInProgress) + if slices.Contains(skipCDWfrStatusList, cdWfr.Status) { + impl.logger.Warnw("skipped deployment as the workflow runner status is already in terminal state, handleConcurrentOrInvalidRequest", "cdWfrId", cdWfrId, "status", cdWfr.Status) + toSkipProcess = true + return toSkipProcess, nil + } + isLatest, err := impl.cdWorkflowRunnerService.CheckIfWfrLatest(cdWfrId, pipelineId) + if err != nil { + impl.logger.Errorw("error, CheckIfWfrLatest", "err", err, "cdWfrId", cdWfrId) + return toSkipProcess, err + } + if !isLatest { + impl.logger.Warnw("skipped deployment as the workflow runner is not the latest one", "cdWfrId", cdWfrId) + err := impl.cdWorkflowCommonService.MarkCurrentDeploymentFailed(cdWfr, errors.New(pipelineConfig.NEW_DEPLOYMENT_INITIATED), overrideRequest.UserId) + if err != nil { + impl.logger.Errorw("error while updating current runner status to failed, handleConcurrentOrInvalidRequest", "cdWfr", cdWfrId, "err", err) + return toSkipProcess, err + } + toSkipProcess = true + return toSkipProcess, nil + } + } + } else { + //no request in process for pipeline, continue + } + + return toSkipProcess, nil +} + +func (impl *WorkflowEventProcessorImpl) isReleaseContextExistsForPipeline(pipelineId, cdWfrId int) bool { + impl.devtronAppReleaseContextMapLock.Lock() + defer impl.devtronAppReleaseContextMapLock.Unlock() + if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { + return releaseContext.RunnerId == cdWfrId + } + return false +} + +func (impl *WorkflowEventProcessorImpl) UpdateReleaseContextForPipeline(pipelineId, cdWfrId int, cancel context.CancelFunc) { + impl.devtronAppReleaseContextMapLock.Lock() + defer impl.devtronAppReleaseContextMapLock.Unlock() + if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { + //Abort previous running release + impl.logger.Infow("new deployment has been triggered with a running deployment in progress!", "aborting deployment for pipelineId", pipelineId) + releaseContext.CancelContext() + } + impl.devtronAppReleaseContextMap[pipelineId] = bean.DevtronAppReleaseContextType{ + CancelContext: cancel, + RunnerId: cdWfrId, + } +} + +func (impl *WorkflowEventProcessorImpl) cleanUpDevtronAppReleaseContextMap(pipelineId, wfrId int) { + if impl.isReleaseContextExistsForPipeline(pipelineId, wfrId) { + impl.devtronAppReleaseContextMapLock.Lock() + defer impl.devtronAppReleaseContextMapLock.Unlock() + if _, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { + delete(impl.devtronAppReleaseContextMap, pipelineId) + } + } +} + +func (impl *WorkflowEventProcessorImpl) extractOverrideRequestFromCDAsyncInstallEvent(msg *model.PubSubMsg) (*bean.AsyncCdDeployEvent, *client2.AppIdentifier, error) { + CDAsyncInstallNatsMessage := &bean.AsyncCdDeployEvent{} + err := json.Unmarshal([]byte(msg.Data), CDAsyncInstallNatsMessage) + if err != nil { + impl.logger.Errorw("error in unmarshalling CD async install request nats message", "err", err) + return nil, nil, err + } + pipeline, err := impl.pipelineRepository.FindById(CDAsyncInstallNatsMessage.ValuesOverrideRequest.PipelineId) + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) + return nil, nil, err + } + triggerAdapter.SetPipelineFieldsInOverrideRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest, pipeline) + if CDAsyncInstallNatsMessage.ValuesOverrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { + CDAsyncInstallNatsMessage.ValuesOverrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY + } + appIdentifier := &client2.AppIdentifier{ + ClusterId: pipeline.Environment.ClusterId, + Namespace: pipeline.Environment.Namespace, + ReleaseName: pipeline.DeploymentAppName, + } + return CDAsyncInstallNatsMessage, appIdentifier, nil +} + +func (impl *WorkflowEventProcessorImpl) SubscribeCDPipelineDeleteEvent() error { + callback := func(msg *model.PubSubMsg) { + cdPipelineDeleteEvent := &bean7.CdPipelineDeleteEvent{} + err := json.Unmarshal([]byte(msg.Data), cdPipelineDeleteEvent) + if err != nil { + impl.logger.Errorw("error while unmarshalling cdPipelineDeleteEvent object", "err", err, "msg", msg.Data) + return + } + pipeline, err := impl.pipelineRepository.FindByIdEvenIfInactive(cdPipelineDeleteEvent.PipelineId) + if err != nil { + impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err, "pipelineId", cdPipelineDeleteEvent.PipelineId) + return + } + impl.RemoveReleaseContextForPipeline(cdPipelineDeleteEvent) + //there is a possibility that when the pipeline was deleted, async request nats message was not consumed completely and could have led to dangling deployment app + //trying to delete deployment app once + err = impl.cdPipelineConfigService.DeleteHelmTypePipelineDeploymentApp(context.Background(), true, pipeline) + if err != nil { + impl.logger.Errorw("error, DeleteHelmTypePipelineDeploymentApp", "pipelineId", pipeline.Id) + } + } + // add required logging here + var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { + cdStageCompleteEvent := &bean.CdStageCompleteEvent{} + err := json.Unmarshal([]byte(msg.Data), cdStageCompleteEvent) + if err != nil { + return "error while unmarshalling cdPipelineDeleteEvent object", []interface{}{"err", err, "msg", msg.Data} + } + return "got message for cd pipeline deletion", []interface{}{"request", cdStageCompleteEvent} + } + + err := impl.pubSubClient.Subscribe(pubsub.CD_PIPELINE_DELETE_EVENT_TOPIC, callback, loggerFunc) + if err != nil { + impl.logger.Error("error", "err", err) + return err + } + return nil +} + +func (impl *WorkflowEventProcessorImpl) RemoveReleaseContextForPipeline(cdPipelineDeleteEvent *bean7.CdPipelineDeleteEvent) { + impl.devtronAppReleaseContextMapLock.Lock() + defer impl.devtronAppReleaseContextMapLock.Unlock() + if releaseContext, ok := impl.devtronAppReleaseContextMap[cdPipelineDeleteEvent.PipelineId]; ok { + //Abort previous running release + impl.logger.Infow("CD pipeline has been deleted with a running deployment in progress!", "aborting deployment for pipelineId", cdPipelineDeleteEvent.PipelineId) + cdWfr, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(releaseContext.RunnerId) + if err != nil { + impl.logger.Errorw("err on fetching cd workflow runner, RemoveReleaseContextForPipeline", "err", err) + } + if err = impl.cdWorkflowCommonService.MarkCurrentDeploymentFailed(cdWfr, errors.New("CD pipeline has been deleted"), cdPipelineDeleteEvent.TriggeredBy); err != nil { + impl.logger.Errorw("error while updating current runner status to failed, RemoveReleaseContextForPipeline", "cdWfr", cdWfr.Id, "err", err) + } + releaseContext.CancelContext() + delete(impl.devtronAppReleaseContextMap, cdPipelineDeleteEvent.PipelineId) + } + return +} diff --git a/pkg/eventProcessor/in/wire_eventProcessorIn.go b/pkg/eventProcessor/in/wire_eventProcessorIn.go index 1d0b2083ec..f609e423fd 100644 --- a/pkg/eventProcessor/in/wire_eventProcessorIn.go +++ b/pkg/eventProcessor/in/wire_eventProcessorIn.go @@ -3,5 +3,9 @@ package in import "github.com/google/wire" var EventProcessorInWireSet = wire.NewSet( + NewCIPipelineEventProcessorImpl, NewWorkflowEventProcessorImpl, + NewDeployedApplicationEventProcessorImpl, + NewCDPipelineEventProcessorImpl, + NewAppStoreAppsEventProcessorImpl, ) diff --git a/pkg/eventProcessor/out/AppStoreAppsEventPublishService.go b/pkg/eventProcessor/out/AppStoreAppsEventPublishService.go new file mode 100644 index 0000000000..39a91af831 --- /dev/null +++ b/pkg/eventProcessor/out/AppStoreAppsEventPublishService.go @@ -0,0 +1,48 @@ +package out + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" + "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" + "go.uber.org/zap" +) + +type AppStoreAppsEventPublishService interface { + PublishBulkDeployEvent(installAppVersions []*appStoreBean.InstallAppVersionDTO) map[int]error +} + +type AppStoreAppsEventPublishServiceImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl +} + +func NewAppStoreAppsEventPublishServiceImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl) *AppStoreAppsEventPublishServiceImpl { + return &AppStoreAppsEventPublishServiceImpl{ + logger: logger, + pubSubClient: pubSubClient, + } +} + +// PublishBulkDeployEvent take installAppVersions and published their event. Response is map of installedAppVersionId along with error in publishing if any +func (impl *AppStoreAppsEventPublishServiceImpl) PublishBulkDeployEvent(installAppVersions []*appStoreBean.InstallAppVersionDTO) map[int]error { + responseMap := make(map[int]error, len(installAppVersions)) + for _, version := range installAppVersions { + var publishError error + payload := &bean.BulkDeployPayload{InstalledAppVersionId: version.InstalledAppVersionId, InstalledAppVersionHistoryId: version.InstalledAppVersionHistoryId} + data, err := json.Marshal(payload) + if err != nil { + impl.logger.Errorw("error in marshaling installed app version bulk deploy event payload", "err", err, "payload", payload) + publishError = err + } else { + err = impl.pubSubClient.Publish(pubsub.BULK_APPSTORE_DEPLOY_TOPIC, string(data)) + if err != nil { + impl.logger.Errorw("err while publishing msg for app-store bulk deploy", "msg", data, "err", err) + publishError = err + } + } + responseMap[version.InstalledAppVersionId] = publishError + } + return responseMap +} diff --git a/pkg/eventProcessor/out/CDPipelineEventPublishService.go b/pkg/eventProcessor/out/CDPipelineEventPublishService.go new file mode 100644 index 0000000000..b969eebc6e --- /dev/null +++ b/pkg/eventProcessor/out/CDPipelineEventPublishService.go @@ -0,0 +1,78 @@ +package out + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + bean2 "github.com/devtron-labs/devtron/api/bean" + "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" + "go.uber.org/zap" +) + +type CDPipelineEventPublishService interface { + PublishBulkTriggerTopicEvent(pipelineId, appId, + artifactId int, userId int32) error + + PublishArgoTypePipelineSyncEvent(pipelineId, installedAppVersionId int, + userId int32, isAppStoreApplication bool) error +} + +type CDPipelineEventPublishServiceImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl +} + +func NewCDPipelineEventPublishServiceImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl) *CDPipelineEventPublishServiceImpl { + return &CDPipelineEventPublishServiceImpl{ + logger: logger, + pubSubClient: pubSubClient, + } +} + +func (impl *CDPipelineEventPublishServiceImpl) PublishBulkTriggerTopicEvent(pipelineId, appId, + artifactId int, userId int32) error { + event := &bean.BulkCDDeployEvent{ + ValuesOverrideRequest: &bean2.ValuesOverrideRequest{ + PipelineId: pipelineId, + AppId: appId, + CiArtifactId: artifactId, + UserId: userId, + CdWorkflowType: bean2.CD_WORKFLOW_TYPE_DEPLOY, + }, + UserId: userId, + } + payload, err := json.Marshal(event) + if err != nil { + impl.logger.Errorw("failed to marshal cd bulk deploy event request", "request", event, "err", err) + return err + } + err = impl.pubSubClient.Publish(pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, string(payload)) + if err != nil { + impl.logger.Errorw("failed to publish trigger request event", "topic", pubsub.CD_BULK_DEPLOY_TRIGGER_TOPIC, + "err", err, "request", event) + return err + } + return nil +} + +func (impl *CDPipelineEventPublishServiceImpl) PublishArgoTypePipelineSyncEvent(pipelineId, installedAppVersionId int, + userId int32, isAppStoreApplication bool) error { + statusUpdateEvent := bean.ArgoPipelineStatusSyncEvent{ + PipelineId: pipelineId, + InstalledAppVersionId: installedAppVersionId, + UserId: userId, + IsAppStoreApplication: isAppStoreApplication, + } + data, err := json.Marshal(statusUpdateEvent) + if err != nil { + impl.logger.Errorw("error while writing cd pipeline delete event to nats", "err", err, "req", statusUpdateEvent) + return err + } else { + err = impl.pubSubClient.Publish(pubsub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, string(data)) + if err != nil { + impl.logger.Errorw("error, PublishArgoTypePipelineSyncEvent", "topic", pubsub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, "error", err, "data", data) + return err + } + } + return nil +} diff --git a/pkg/eventProcessor/out/CIPipelineEventPublishService.go b/pkg/eventProcessor/out/CIPipelineEventPublishService.go new file mode 100644 index 0000000000..661162d228 --- /dev/null +++ b/pkg/eventProcessor/out/CIPipelineEventPublishService.go @@ -0,0 +1,44 @@ +package out + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out/bean" + "go.uber.org/zap" +) + +type CIPipelineEventPublishService interface { + PublishGitWebhookEvent(gitHostId int, eventType, requestJSON string) error +} + +type CIPipelineEventPublishServiceImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl +} + +func NewCIPipelineEventPublishServiceImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl) *CIPipelineEventPublishServiceImpl { + return &CIPipelineEventPublishServiceImpl{ + logger: logger, + pubSubClient: pubSubClient, + } +} + +func (impl *CIPipelineEventPublishServiceImpl) PublishGitWebhookEvent(gitHostId int, eventType, requestJSON string) error { + event := &bean.CIPipelineGitWebhookEvent{ + GitHostId: gitHostId, + EventType: eventType, + RequestPayloadJson: requestJSON, + } + body, err := json.Marshal(event) + if err != nil { + impl.logger.Errorw("error in marshaling git webhook event", "err", err, "event", event) + return err + } + err = impl.pubSubClient.Publish(pubsub.WEBHOOK_EVENT_TOPIC, string(body)) + if err != nil { + impl.logger.Errorw("error in publishing git webhook event", "err", err, "eventBody", body) + return err + } + return nil +} diff --git a/pkg/eventProcessor/out/PipelineConfigEventPublishService.go b/pkg/eventProcessor/out/PipelineConfigEventPublishService.go new file mode 100644 index 0000000000..b52ad74fbb --- /dev/null +++ b/pkg/eventProcessor/out/PipelineConfigEventPublishService.go @@ -0,0 +1,46 @@ +package out + +import ( + "encoding/json" + pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out/bean" + "go.uber.org/zap" +) + +type PipelineConfigEventPublishService interface { + PublishCDPipelineDelete(pipelineId int, triggeredBy int32) error +} + +type PipelineConfigEventPublishServiceImpl struct { + logger *zap.SugaredLogger + pubSubClient *pubsub.PubSubClientServiceImpl +} + +func NewPipelineConfigEventPublishServiceImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl) *PipelineConfigEventPublishServiceImpl { + return &PipelineConfigEventPublishServiceImpl{ + logger: logger, + pubSubClient: pubSubClient, + } + +} + +func (impl *PipelineConfigEventPublishServiceImpl) PublishCDPipelineDelete(pipelineId int, triggeredBy int32) error { + impl.logger.Infow("cd pipeline delete event handle", "pipelineId", pipelineId, "triggeredBy", triggeredBy) + req := &bean.CdPipelineDeleteEvent{ + PipelineId: pipelineId, + TriggeredBy: triggeredBy, + } + data, err := json.Marshal(req) + if err != nil { + impl.logger.Errorw("error while writing cd pipeline delete event to nats", "err", err, "req", req) + return err + } else { + err = impl.pubSubClient.Publish(pubsub.CD_PIPELINE_DELETE_EVENT_TOPIC, string(data)) + if err != nil { + impl.logger.Errorw("Error while publishing request", "topic", pubsub.CD_PIPELINE_DELETE_EVENT_TOPIC, "error", err) + return err + } + } + return nil +} diff --git a/pkg/eventProcessor/out/bean/bean.go b/pkg/eventProcessor/out/bean/bean.go index cb36a44bb1..9824c1515d 100644 --- a/pkg/eventProcessor/out/bean/bean.go +++ b/pkg/eventProcessor/out/bean/bean.go @@ -1,6 +1,8 @@ package bean -import bean4 "github.com/devtron-labs/devtron/pkg/deployment/deployedApp/bean" +import ( + bean4 "github.com/devtron-labs/devtron/pkg/deployment/deployedApp/bean" +) type BulkTriggerRequest struct { CiArtifactId int `sql:"ci_artifact_id"` @@ -21,3 +23,14 @@ type DeploymentGroupAppWithEnv struct { UserId int32 `json:"userId"` RequestType bean4.RequestType `json:"requestType" validate:"oneof=START STOP"` } + +type CdPipelineDeleteEvent struct { + PipelineId int `json:"pipelineId"` + TriggeredBy int32 `json:"triggeredBy"` +} + +type CIPipelineGitWebhookEvent struct { + GitHostId int `json:"gitHostId"` + EventType string `json:"eventType"` + RequestPayloadJson string `json:"requestPayloadJson"` +} diff --git a/pkg/eventProcessor/out/wire_eventProcessorOut.go b/pkg/eventProcessor/out/wire_eventProcessorOut.go index 2ef126e280..78cb6ce842 100644 --- a/pkg/eventProcessor/out/wire_eventProcessorOut.go +++ b/pkg/eventProcessor/out/wire_eventProcessorOut.go @@ -5,4 +5,16 @@ import "github.com/google/wire" var EventProcessorOutWireSet = wire.NewSet( NewWorkflowEventPublishServiceImpl, wire.Bind(new(WorkflowEventPublishService), new(*WorkflowEventPublishServiceImpl)), + + NewPipelineConfigEventPublishServiceImpl, + wire.Bind(new(PipelineConfigEventPublishService), new(*PipelineConfigEventPublishServiceImpl)), + + NewCDPipelineEventPublishServiceImpl, + wire.Bind(new(CDPipelineEventPublishService), new(*CDPipelineEventPublishServiceImpl)), + + NewAppStoreAppsEventPublishServiceImpl, + wire.Bind(new(AppStoreAppsEventPublishService), new(*AppStoreAppsEventPublishServiceImpl)), + + NewCIPipelineEventPublishServiceImpl, + wire.Bind(new(CIPipelineEventPublishService), new(*CIPipelineEventPublishServiceImpl)), ) diff --git a/pkg/generateManifest/DeployementTemplateService.go b/pkg/generateManifest/DeployementTemplateService.go index d5988425de..767cf57418 100644 --- a/pkg/generateManifest/DeployementTemplateService.go +++ b/pkg/generateManifest/DeployementTemplateService.go @@ -13,7 +13,6 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" "github.com/devtron-labs/devtron/pkg/chart" - chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" repository3 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" "github.com/devtron-labs/devtron/pkg/pipeline" @@ -76,10 +75,8 @@ type DeploymentTemplateServiceImpl struct { Logger *zap.SugaredLogger chartService chart.ChartService appListingService app.AppListingService - appListingRepository repository.AppListingRepository deploymentTemplateRepository repository.DeploymentTemplateRepository helmAppService client.HelmAppService - chartRepository chartRepoRepository.ChartRepository chartTemplateServiceImpl util.ChartTemplateService K8sUtil *k8s.K8sServiceImpl helmAppClient gRPC.HelmAppClient @@ -93,10 +90,8 @@ type DeploymentTemplateServiceImpl struct { func NewDeploymentTemplateServiceImpl(Logger *zap.SugaredLogger, chartService chart.ChartService, appListingService app.AppListingService, - appListingRepository repository.AppListingRepository, deploymentTemplateRepository repository.DeploymentTemplateRepository, helmAppService client.HelmAppService, - chartRepository chartRepoRepository.ChartRepository, chartTemplateServiceImpl util.ChartTemplateService, helmAppClient gRPC.HelmAppClient, K8sUtil *k8s.K8sServiceImpl, @@ -110,10 +105,8 @@ func NewDeploymentTemplateServiceImpl(Logger *zap.SugaredLogger, chartService ch Logger: Logger, chartService: chartService, appListingService: appListingService, - appListingRepository: appListingRepository, deploymentTemplateRepository: deploymentTemplateRepository, helmAppService: helmAppService, - chartRepository: chartRepository, chartTemplateServiceImpl: chartTemplateServiceImpl, K8sUtil: K8sUtil, helmAppClient: helmAppClient, diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index 6579224ff6..714fbf7096 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -47,6 +47,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/history" @@ -116,44 +117,46 @@ type CdPipelineConfigService interface { //GetEnvironmentListForAutocompleteFilter : lists environment for given configuration GetEnvironmentListForAutocompleteFilter(envName string, clusterIds []int, offset int, size int, token string, checkAuthBatch func(token string, appObject []string, envObject []string) (map[string]bool, map[string]bool), ctx context.Context) (*cluster.ResourceGroupingResponse, error) RegisterInACD(ctx context.Context, chartGitAttr *commonBean.ChartGitAttribute, userId int32) error + //DeleteHelmTypePipelineDeploymentApp : Deletes helm release for a pipeline with force flag + DeleteHelmTypePipelineDeploymentApp(ctx context.Context, forceDelete bool, pipeline *pipelineConfig.Pipeline) error } type CdPipelineConfigServiceImpl struct { - logger *zap.SugaredLogger - pipelineRepository pipelineConfig.PipelineRepository - environmentRepository repository2.EnvironmentRepository - pipelineConfigRepository chartConfig.PipelineConfigRepository - appWorkflowRepository appWorkflow.AppWorkflowRepository - pipelineStageService PipelineStageService - appRepo app2.AppRepository - appService app.AppService - deploymentGroupRepository repository.DeploymentGroupRepository - ciCdPipelineOrchestrator CiCdPipelineOrchestrator - appStatusRepository appStatus.AppStatusRepository - ciPipelineRepository pipelineConfig.CiPipelineRepository - prePostCdScriptHistoryService history.PrePostCdScriptHistoryService - clusterRepository repository2.ClusterRepository - helmAppService client.HelmAppService - enforcerUtil rbac.EnforcerUtil - pipelineStrategyHistoryService history.PipelineStrategyHistoryService - chartRepository chartRepoRepository.ChartRepository - resourceGroupService resourceGroup2.ResourceGroupService - propertiesConfigService PropertiesConfigService - deploymentTemplateHistoryService history.DeploymentTemplateHistoryService - scopedVariableManager variables.ScopedVariableManager - deploymentConfig *DeploymentServiceTypeConfig - application application.ServiceClient - customTagService CustomTagService - pipelineConfigListenerService PipelineConfigListenerService - devtronAppCMCSService DevtronAppCMCSService - ciPipelineConfigService CiPipelineConfigService - buildPipelineSwitchService BuildPipelineSwitchService - argoClientWrapperService argocdServer.ArgoClientWrapperService - deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService - gitOpsConfigReadService config.GitOpsConfigReadService - gitOperationService git.GitOperationService - chartService chart.ChartService - imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService + logger *zap.SugaredLogger + pipelineRepository pipelineConfig.PipelineRepository + environmentRepository repository2.EnvironmentRepository + pipelineConfigRepository chartConfig.PipelineConfigRepository + appWorkflowRepository appWorkflow.AppWorkflowRepository + pipelineStageService PipelineStageService + appRepo app2.AppRepository + appService app.AppService + deploymentGroupRepository repository.DeploymentGroupRepository + ciCdPipelineOrchestrator CiCdPipelineOrchestrator + appStatusRepository appStatus.AppStatusRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + prePostCdScriptHistoryService history.PrePostCdScriptHistoryService + clusterRepository repository2.ClusterRepository + helmAppService client.HelmAppService + enforcerUtil rbac.EnforcerUtil + pipelineStrategyHistoryService history.PipelineStrategyHistoryService + chartRepository chartRepoRepository.ChartRepository + resourceGroupService resourceGroup2.ResourceGroupService + propertiesConfigService PropertiesConfigService + deploymentTemplateHistoryService history.DeploymentTemplateHistoryService + scopedVariableManager variables.ScopedVariableManager + deploymentConfig *DeploymentServiceTypeConfig + application application.ServiceClient + customTagService CustomTagService + devtronAppCMCSService DevtronAppCMCSService + ciPipelineConfigService CiPipelineConfigService + buildPipelineSwitchService BuildPipelineSwitchService + argoClientWrapperService argocdServer.ArgoClientWrapperService + deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService + gitOpsConfigReadService config.GitOpsConfigReadService + gitOperationService git.GitOperationService + chartService chart.ChartService + imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService + pipelineConfigEventPublishService out.PipelineConfigEventPublishService } func NewCdPipelineConfigServiceImpl(logger *zap.SugaredLogger, pipelineRepository pipelineConfig.PipelineRepository, @@ -169,50 +172,51 @@ func NewCdPipelineConfigServiceImpl(logger *zap.SugaredLogger, pipelineRepositor deploymentTemplateHistoryService history.DeploymentTemplateHistoryService, scopedVariableManager variables.ScopedVariableManager, deploymentConfig *DeploymentServiceTypeConfig, application application.ServiceClient, customTagService CustomTagService, - pipelineConfigListenerService PipelineConfigListenerService, devtronAppCMCSService DevtronAppCMCSService, + devtronAppCMCSService DevtronAppCMCSService, ciPipelineConfigService CiPipelineConfigService, buildPipelineSwitchService BuildPipelineSwitchService, argoClientWrapperService argocdServer.ArgoClientWrapperService, deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, gitOpsConfigReadService config.GitOpsConfigReadService, gitOperationService git.GitOperationService, chartService chart.ChartService, - imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService) *CdPipelineConfigServiceImpl { + imageDigestPolicyService imageDigestPolicy.ImageDigestPolicyService, + pipelineConfigEventPublishService out.PipelineConfigEventPublishService) *CdPipelineConfigServiceImpl { return &CdPipelineConfigServiceImpl{ - logger: logger, - pipelineRepository: pipelineRepository, - environmentRepository: environmentRepository, - pipelineConfigRepository: pipelineConfigRepository, - appWorkflowRepository: appWorkflowRepository, - pipelineStageService: pipelineStageService, - appRepo: appRepo, - appService: appService, - deploymentGroupRepository: deploymentGroupRepository, - ciCdPipelineOrchestrator: ciCdPipelineOrchestrator, - appStatusRepository: appStatusRepository, - ciPipelineRepository: ciPipelineRepository, - prePostCdScriptHistoryService: prePostCdScriptHistoryService, - clusterRepository: clusterRepository, - helmAppService: helmAppService, - enforcerUtil: enforcerUtil, - pipelineStrategyHistoryService: pipelineStrategyHistoryService, - chartRepository: chartRepository, - resourceGroupService: resourceGroupService, - propertiesConfigService: propertiesConfigService, - deploymentTemplateHistoryService: deploymentTemplateHistoryService, - scopedVariableManager: scopedVariableManager, - deploymentConfig: deploymentConfig, - application: application, - pipelineConfigListenerService: pipelineConfigListenerService, - chartService: chartService, - devtronAppCMCSService: devtronAppCMCSService, - customTagService: customTagService, - ciPipelineConfigService: ciPipelineConfigService, - buildPipelineSwitchService: buildPipelineSwitchService, - argoClientWrapperService: argoClientWrapperService, - deployedAppMetricsService: deployedAppMetricsService, - gitOpsConfigReadService: gitOpsConfigReadService, - gitOperationService: gitOperationService, - imageDigestPolicyService: imageDigestPolicyService, + logger: logger, + pipelineRepository: pipelineRepository, + environmentRepository: environmentRepository, + pipelineConfigRepository: pipelineConfigRepository, + appWorkflowRepository: appWorkflowRepository, + pipelineStageService: pipelineStageService, + appRepo: appRepo, + appService: appService, + deploymentGroupRepository: deploymentGroupRepository, + ciCdPipelineOrchestrator: ciCdPipelineOrchestrator, + appStatusRepository: appStatusRepository, + ciPipelineRepository: ciPipelineRepository, + prePostCdScriptHistoryService: prePostCdScriptHistoryService, + clusterRepository: clusterRepository, + helmAppService: helmAppService, + enforcerUtil: enforcerUtil, + pipelineStrategyHistoryService: pipelineStrategyHistoryService, + chartRepository: chartRepository, + resourceGroupService: resourceGroupService, + propertiesConfigService: propertiesConfigService, + deploymentTemplateHistoryService: deploymentTemplateHistoryService, + scopedVariableManager: scopedVariableManager, + deploymentConfig: deploymentConfig, + application: application, + chartService: chartService, + devtronAppCMCSService: devtronAppCMCSService, + customTagService: customTagService, + ciPipelineConfigService: ciPipelineConfigService, + buildPipelineSwitchService: buildPipelineSwitchService, + argoClientWrapperService: argoClientWrapperService, + deployedAppMetricsService: deployedAppMetricsService, + gitOpsConfigReadService: gitOpsConfigReadService, + gitOperationService: gitOperationService, + imageDigestPolicyService: imageDigestPolicyService, + pipelineConfigEventPublishService: pipelineConfigEventPublishService, } } @@ -854,22 +858,11 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf impl.logger.Infow("app deleted from argocd", "id", pipeline.Id, "pipelineName", pipeline.Name, "app", deploymentAppName) } } else if util.IsHelmApp(pipeline.DeploymentAppType) { - appIdentifier := &client.AppIdentifier{ - ClusterId: pipeline.Environment.ClusterId, - ReleaseName: deploymentAppName, - Namespace: pipeline.Environment.Namespace, - } - deleteResourceResponse, err := impl.helmAppService.DeleteApplication(ctx, appIdentifier) - if forceDelete || errors3.As(err, &models2.NamespaceNotExistError{}) { - impl.logger.Warnw("error while deletion of helm application, ignore error and delete from db since force delete req", "error", err, "pipelineId", pipeline.Id) - } else { - if err != nil { - impl.logger.Errorw("error in deleting helm application", "error", err, "appIdentifier", appIdentifier) - return deleteResponse, err - } - if deleteResourceResponse == nil || !deleteResourceResponse.GetSuccess() { - return deleteResponse, errors2.New("delete application response unsuccessful") - } + err = impl.DeleteHelmTypePipelineDeploymentApp(ctx, forceDelete, pipeline) + if err != nil { + impl.logger.Errorw("error, DeleteHelmTypePipelineDeploymentApp", "err", err, "pipelineId", pipeline.Id) + return deleteResponse, err + } } } @@ -879,10 +872,32 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf return deleteResponse, err } deleteResponse.DeleteInitiated = true - impl.pipelineConfigListenerService.HandleCdPipelineDelete(pipeline.Id, userId) + impl.pipelineConfigEventPublishService.PublishCDPipelineDelete(pipeline.Id, userId) return deleteResponse, nil } +func (impl *CdPipelineConfigServiceImpl) DeleteHelmTypePipelineDeploymentApp(ctx context.Context, forceDelete bool, pipeline *pipelineConfig.Pipeline) error { + deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) + appIdentifier := &client.AppIdentifier{ + ClusterId: pipeline.Environment.ClusterId, + ReleaseName: deploymentAppName, + Namespace: pipeline.Environment.Namespace, + } + deleteResourceResponse, err := impl.helmAppService.DeleteApplication(ctx, appIdentifier) + if forceDelete || errors3.As(err, &models2.NamespaceNotExistError{}) { + impl.logger.Warnw("error while deletion of helm application, ignore error and delete from db since force delete req", "error", err, "pipelineId", pipeline.Id) + } else { + if err != nil { + impl.logger.Errorw("error in deleting helm application", "error", err, "appIdentifier", appIdentifier) + return err + } + if deleteResourceResponse == nil || !deleteResourceResponse.GetSuccess() { + return errors2.New("delete application response unsuccessful") + } + } + return nil +} + func (impl *CdPipelineConfigServiceImpl) DeleteACDAppCdPipelineWithNonCascade(pipeline *pipelineConfig.Pipeline, ctx context.Context, forceDelete bool, userId int32) error { if forceDelete { _, err := impl.DeleteCdPipeline(pipeline, ctx, bean.FORCE_DELETE, false, userId) diff --git a/pkg/pipeline/PipelineConfigServiceListener.go b/pkg/pipeline/PipelineConfigServiceListener.go deleted file mode 100644 index 213575d15d..0000000000 --- a/pkg/pipeline/PipelineConfigServiceListener.go +++ /dev/null @@ -1,38 +0,0 @@ -package pipeline - -import ( - "go.uber.org/zap" - "reflect" -) - -type PipelineConfigListenerService interface { - RegisterPipelineDeleteListener(listener PipelineConfigListener) - HandleCdPipelineDelete(pipelineId int, triggeredBy int32) -} - -type PipelineConfigListenerServiceImpl struct { - logger *zap.SugaredLogger - deleteCdPipelineListeners []PipelineConfigListener -} - -type PipelineConfigListener interface { - OnDeleteCdPipelineEvent(pipelineId int, triggeredBy int32) -} - -func NewPipelineConfigListenerServiceImpl(logger *zap.SugaredLogger) *PipelineConfigListenerServiceImpl { - return &PipelineConfigListenerServiceImpl{ - logger: logger, - } -} - -func (impl *PipelineConfigListenerServiceImpl) RegisterPipelineDeleteListener(listener PipelineConfigListener) { - impl.logger.Infof("registering listener %s, service: PipelineConfigListenerService", reflect.TypeOf(listener)) - impl.deleteCdPipelineListeners = append(impl.deleteCdPipelineListeners, listener) -} - -func (impl *PipelineConfigListenerServiceImpl) HandleCdPipelineDelete(pipelineId int, triggeredBy int32) { - impl.logger.Infow("cd pipeline delete process", "pipelineId", pipelineId, "triggeredBy", triggeredBy) - for _, deleteCdPipelineListener := range impl.deleteCdPipelineListeners { - deleteCdPipelineListener.OnDeleteCdPipelineEvent(pipelineId, triggeredBy) - } -} diff --git a/pkg/workflow/cd/CdWorkflowRunnerService.go b/pkg/workflow/cd/CdWorkflowRunnerService.go index 68a64cbd6a..4efd12b5f4 100644 --- a/pkg/workflow/cd/CdWorkflowRunnerService.go +++ b/pkg/workflow/cd/CdWorkflowRunnerService.go @@ -4,11 +4,13 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/workflow/cd/adapter" "github.com/devtron-labs/devtron/pkg/workflow/cd/bean" + "github.com/go-pg/pg" "go.uber.org/zap" ) type CdWorkflowRunnerService interface { FindWorkflowRunnerById(wfrId int) (*bean.CdWorkflowRunnerDto, error) + CheckIfWfrLatest(wfrId, pipelineId int) (isLatest bool, err error) } type CdWorkflowRunnerServiceImpl struct { @@ -33,3 +35,12 @@ func (impl *CdWorkflowRunnerServiceImpl) FindWorkflowRunnerById(wfrId int) (*bea return adapter.ConvertCdWorkflowRunnerDbObjToDto(cdWfr), nil } + +func (impl *CdWorkflowRunnerServiceImpl) CheckIfWfrLatest(wfrId, pipelineId int) (isLatest bool, err error) { + isLatest, err = impl.cdWorkflowRepository.IsLatestCDWfr(wfrId, pipelineId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("err in checking latest cd workflow runner", "err", err) + return false, err + } + return isLatest, nil +} diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index e08759c094..f59a142003 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -30,6 +30,7 @@ import ( "github.com/devtron-labs/devtron/pkg/build/artifacts" "github.com/devtron-labs/devtron/pkg/deployment/manifest" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps" + triggerAdapter "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/adapter" bean5 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" bean7 "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" "github.com/devtron-labs/devtron/pkg/pipeline" @@ -43,16 +44,6 @@ import ( "time" "github.com/devtron-labs/common-lib/pubsub-lib/model" - pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" - repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" - "github.com/devtron-labs/devtron/pkg/pipeline/types" - serverBean "github.com/devtron-labs/devtron/pkg/server/bean" - util4 "github.com/devtron-labs/devtron/util" - "github.com/pkg/errors" - "go.opentelemetry.io/otel" - "k8s.io/utils/strings/slices" - - pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/models" "github.com/devtron-labs/devtron/internal/sql/repository" @@ -61,8 +52,15 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/app" + bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + repository4 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/pipeline/types" + serverBean "github.com/devtron-labs/devtron/pkg/server/bean" + util4 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/rbac" "github.com/go-pg/pg" + "github.com/pkg/errors" + "go.opentelemetry.io/otel" "go.uber.org/zap" ) @@ -76,8 +74,10 @@ type WorkflowDagExecutor interface { HandleExternalCiWebhook(externalCiId int, request *bean2.CiArtifactWebhookRequest, auth func(token string, projectObject string, envObject string) bool, token string) (id int, err error) + ProcessDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage *bean7.AsyncCdDeployEvent, appIdentifier *client2.AppIdentifier, + ctx context.Context) error + UpdateWorkflowRunnerStatusForDeployment(appIdentifier *client2.AppIdentifier, wfr *pipelineConfig.CdWorkflowRunner, skipReleaseNotFound bool) bool - OnDeleteCdPipelineEvent(pipelineId int, triggeredBy int32) BuildCiArtifactRequestForWebhook(event pipeline.ExternalCiWebhookDto) (*bean2.CiArtifactWebhookRequest, error) } @@ -86,7 +86,6 @@ type WorkflowDagExecutorImpl struct { logger *zap.SugaredLogger pipelineRepository pipelineConfig.PipelineRepository cdWorkflowRepository pipelineConfig.CdWorkflowRepository - pubsubClient *pubsub.PubSubClientServiceImpl ciArtifactRepository repository.CiArtifactRepository enforcerUtil rbac.EnforcerUtil appWorkflowRepository appWorkflow.AppWorkflowRepository @@ -102,11 +101,6 @@ type WorkflowDagExecutorImpl struct { eventFactory client.EventFactory customTagService pipeline.CustomTagService - devtronAsyncHelmInstallRequestMap map[int]bool - devtronAsyncHelmInstallRequestLock *sync.Mutex - devtronAppReleaseContextMap map[int]DevtronAppReleaseContextType - devtronAppReleaseContextMapLock *sync.Mutex - helmAppService client2.HelmAppService cdWorkflowCommonService cd.CdWorkflowCommonService @@ -116,14 +110,8 @@ type WorkflowDagExecutorImpl struct { commonArtifactService artifacts.CommonArtifactService } -type DevtronAppReleaseContextType struct { - CancelContext context.CancelFunc - RunnerId int -} - func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pipelineConfig.PipelineRepository, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, - pubsubClient *pubsub.PubSubClientServiceImpl, ciArtifactRepository repository.CiArtifactRepository, enforcerUtil rbac.EnforcerUtil, appWorkflowRepository appWorkflow.AppWorkflowRepository, @@ -136,7 +124,6 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi eventFactory client.EventFactory, customTagService pipeline.CustomTagService, helmAppService client2.HelmAppService, - pipelineConfigListenerService pipeline.PipelineConfigListenerService, cdWorkflowCommonService cd.CdWorkflowCommonService, cdTriggerService devtronApps.TriggerService, manifestCreationService manifest.ManifestCreationService, @@ -144,7 +131,6 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi wde := &WorkflowDagExecutorImpl{logger: Logger, pipelineRepository: pipelineRepository, cdWorkflowRepository: cdWorkflowRepository, - pubsubClient: pubsubClient, ciArtifactRepository: ciArtifactRepository, enforcerUtil: enforcerUtil, appWorkflowRepository: appWorkflowRepository, @@ -156,16 +142,11 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi eventClient: eventClient, eventFactory: eventFactory, customTagService: customTagService, - - devtronAsyncHelmInstallRequestMap: make(map[int]bool), - devtronAsyncHelmInstallRequestLock: &sync.Mutex{}, - devtronAppReleaseContextMap: make(map[int]DevtronAppReleaseContextType), - devtronAppReleaseContextMapLock: &sync.Mutex{}, - helmAppService: helmAppService, - cdWorkflowCommonService: cdWorkflowCommonService, - cdTriggerService: cdTriggerService, - manifestCreationService: manifestCreationService, - commonArtifactService: commonArtifactService, + helmAppService: helmAppService, + cdWorkflowCommonService: cdWorkflowCommonService, + cdTriggerService: cdTriggerService, + manifestCreationService: manifestCreationService, + commonArtifactService: commonArtifactService, } config, err := types.GetCdConfig() if err != nil { @@ -182,11 +163,6 @@ func NewWorkflowDagExecutorImpl(Logger *zap.SugaredLogger, pipelineRepository pi return nil } wde.appServiceConfig = appServiceConfig - err = wde.SubscribeDevtronAsyncHelmInstallRequest() - if err != nil { - return nil - } - pipelineConfigListenerService.RegisterPipelineDeleteListener(wde) return wde } @@ -260,7 +236,7 @@ func (impl *WorkflowDagExecutorImpl) extractOverrideRequestFromCDAsyncInstallEve impl.logger.Errorw("error in fetching pipeline by pipelineId", "err", err) return nil, nil, err } - devtronApps.SetPipelineFieldsInOverrideRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest, pipeline) + triggerAdapter.SetPipelineFieldsInOverrideRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest, pipeline) if CDAsyncInstallNatsMessage.ValuesOverrideRequest.DeploymentType == models.DEPLOYMENTTYPE_UNKNOWN { CDAsyncInstallNatsMessage.ValuesOverrideRequest.DeploymentType = models.DEPLOYMENTTYPE_DEPLOY } @@ -376,136 +352,24 @@ func (impl *WorkflowDagExecutorImpl) handleIfPreviousRunnerTriggerRequest(curren return exists, nil } -func (impl *WorkflowDagExecutorImpl) UpdateReleaseContextForPipeline(pipelineId, cdWfrId int, cancel context.CancelFunc) { - impl.devtronAppReleaseContextMapLock.Lock() - defer impl.devtronAppReleaseContextMapLock.Unlock() - if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { - //Abort previous running release - impl.logger.Infow("new deployment has been triggered with a running deployment in progress!", "aborting deployment for pipelineId", pipelineId) - releaseContext.CancelContext() - } - impl.devtronAppReleaseContextMap[pipelineId] = DevtronAppReleaseContextType{ - CancelContext: cancel, - RunnerId: cdWfrId, - } -} - -func (impl *WorkflowDagExecutorImpl) RemoveReleaseContextForPipeline(pipelineId int, triggeredBy int32) { - impl.devtronAppReleaseContextMapLock.Lock() - defer impl.devtronAppReleaseContextMapLock.Unlock() - if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { - //Abort previous running release - impl.logger.Infow("CD pipeline has been deleted with a running deployment in progress!", "aborting deployment for pipelineId", pipelineId) - cdWfr, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(releaseContext.RunnerId) - if err != nil { - impl.logger.Errorw("err on fetching cd workflow runner, RemoveReleaseContextForPipeline", "err", err) - } - if err = impl.cdWorkflowCommonService.MarkCurrentDeploymentFailed(cdWfr, errors.New("CD pipeline has been deleted"), triggeredBy); err != nil { - impl.logger.Errorw("error while updating current runner status to failed, RemoveReleaseContextForPipeline", "cdWfr", cdWfr.Id, "err", err) - } - releaseContext.CancelContext() - delete(impl.devtronAppReleaseContextMap, pipelineId) - } - return -} - -func (impl *WorkflowDagExecutorImpl) OnDeleteCdPipelineEvent(pipelineId int, triggeredBy int32) { - impl.logger.Debugw("CD pipeline delete event received", "pipelineId", pipelineId, "deletedBy", triggeredBy) - impl.RemoveReleaseContextForPipeline(pipelineId, triggeredBy) - return -} - -func (impl *WorkflowDagExecutorImpl) isReleaseContextExistsForPipeline(pipelineId, cdWfrId int) bool { - impl.devtronAppReleaseContextMapLock.Lock() - defer impl.devtronAppReleaseContextMapLock.Unlock() - if releaseContext, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { - return releaseContext.RunnerId == cdWfrId - } - return false -} - -func (impl *WorkflowDagExecutorImpl) handleConcurrentRequest(wfrId int) bool { - impl.devtronAsyncHelmInstallRequestLock.Lock() - defer impl.devtronAsyncHelmInstallRequestLock.Unlock() - if _, exists := impl.devtronAsyncHelmInstallRequestMap[wfrId]; exists { - //request is in process already, Skip here - return true - } - impl.devtronAsyncHelmInstallRequestMap[wfrId] = true - return false -} - -func (impl *WorkflowDagExecutorImpl) cleanUpDevtronAppReleaseContextMap(pipelineId, wfrId int) { - if impl.isReleaseContextExistsForPipeline(pipelineId, wfrId) { - impl.devtronAppReleaseContextMapLock.Lock() - defer impl.devtronAppReleaseContextMapLock.Unlock() - if _, ok := impl.devtronAppReleaseContextMap[pipelineId]; ok { - delete(impl.devtronAppReleaseContextMap, pipelineId) - } - } -} - -func (impl *WorkflowDagExecutorImpl) cleanUpDevtronAsyncHelmInstallRequest(pipelineId, wfrId int) { - impl.devtronAsyncHelmInstallRequestLock.Lock() - defer impl.devtronAsyncHelmInstallRequestLock.Unlock() - if _, exists := impl.devtronAsyncHelmInstallRequestMap[wfrId]; exists { - //request is in process already, Skip here - delete(impl.devtronAsyncHelmInstallRequestMap, wfrId) - } - impl.cleanUpDevtronAppReleaseContextMap(pipelineId, wfrId) -} - -func (impl *WorkflowDagExecutorImpl) processDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage *bean7.AsyncCdDeployEvent, appIdentifier *client2.AppIdentifier) { +func (impl *WorkflowDagExecutorImpl) ProcessDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage *bean7.AsyncCdDeployEvent, appIdentifier *client2.AppIdentifier, + ctx context.Context) error { overrideRequest := CDAsyncInstallNatsMessage.ValuesOverrideRequest cdWfr, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(overrideRequest.WfrId) if err != nil { impl.logger.Errorw("err on fetching cd workflow runner, processDevtronAsyncHelmInstallRequest", "err", err) - return - } - - // skip if the cdWfr.Status is already in a terminal state - skipCDWfrStatusList := pipelineConfig.WfrTerminalStatusList - skipCDWfrStatusList = append(skipCDWfrStatusList, pipelineConfig.WorkflowInProgress) - if slices.Contains(skipCDWfrStatusList, cdWfr.Status) { - impl.logger.Warnw("skipped deployment as the workflow runner status is already in terminal state, processDevtronAsyncHelmInstallRequest", "cdWfrId", cdWfr.Id, "status", cdWfr.Status) - return - } - - //skip if the cdWfr is not the latest one - exists, err := impl.handleIfPreviousRunnerTriggerRequest(cdWfr, overrideRequest.UserId) - if err != nil { - impl.logger.Errorw("err in validating latest cd workflow runner, processDevtronAsyncHelmInstallRequest", "err", err) - return - } - if exists { - impl.logger.Warnw("skipped deployment as the workflow runner is not the latest one", "cdWfrId", cdWfr.Id) - err := impl.cdWorkflowCommonService.MarkCurrentDeploymentFailed(cdWfr, errors.New(pipelineConfig.NEW_DEPLOYMENT_INITIATED), overrideRequest.UserId) - if err != nil { - impl.logger.Errorw("error while updating current runner status to failed, processDevtronAsyncHelmInstallRequest", "cdWfr", cdWfr.Id, "err", err) - return - } - return - } - - if cdWfr.Status == pipelineConfig.WorkflowStarting && impl.isReleaseContextExistsForPipeline(overrideRequest.PipelineId, cdWfr.Id) { - impl.logger.Warnw("event redelivered! deployment is currently in progress, processDevtronAsyncHelmInstallRequest", "cdWfrId", cdWfr.Id, "status", cdWfr.Status) - return + return err } - - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(impl.appServiceConfig.DevtronChartInstallRequestTimeout)*time.Minute) - defer cancel() - - impl.UpdateReleaseContextForPipeline(overrideRequest.PipelineId, cdWfr.Id, cancel) //update workflow runner status, used in app workflow view err = impl.cdWorkflowCommonService.UpdateCDWorkflowRunnerStatus(ctx, overrideRequest, CDAsyncInstallNatsMessage.TriggeredAt, pipelineConfig.WorkflowStarting, "") if err != nil { impl.logger.Errorw("error in updating the workflow runner status, processDevtronAsyncHelmInstallRequest", "cdWfrId", cdWfr.Id, "err", err) - return + return err } // build merged values and save PCO history for the release valuesOverrideResponse, builtChartPath, err := impl.manifestCreationService.BuildManifestForTrigger(overrideRequest, CDAsyncInstallNatsMessage.TriggeredAt, ctx) if err != nil { - return + return err } _, span := otel.Tracer("orchestrator").Start(ctx, "appService.TriggerRelease") @@ -521,41 +385,8 @@ func (impl *WorkflowDagExecutorImpl) processDevtronAsyncHelmInstallRequest(CDAsy span.End() if err1 != nil { impl.logger.Errorw("error while update previous cd workflow runners, processDevtronAsyncHelmInstallRequest", "err", err, "runner", cdWfr, "pipelineId", overrideRequest.PipelineId) - return - } - } -} - -func (impl *WorkflowDagExecutorImpl) SubscribeDevtronAsyncHelmInstallRequest() error { - callback := func(msg *model.PubSubMsg) { - CDAsyncInstallNatsMessage, appIdentifier, err := impl.extractOverrideRequestFromCDAsyncInstallEvent(msg) - if err != nil { - impl.logger.Errorw("err on extracting override request, SubscribeDevtronAsyncHelmInstallRequest", "err", err) - return - } - if skip := impl.handleConcurrentRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest.WfrId); skip { - impl.logger.Warnw("concurrent request received, SubscribeDevtronAsyncHelmInstallRequest", "WfrId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.WfrId) - return - } - defer impl.cleanUpDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage.ValuesOverrideRequest.PipelineId, CDAsyncInstallNatsMessage.ValuesOverrideRequest.WfrId) - impl.processDevtronAsyncHelmInstallRequest(CDAsyncInstallNatsMessage, appIdentifier) - return - } - - // add required logging here - var loggerFunc pubsub.LoggerFunc = func(msg model.PubSubMsg) (string, []interface{}) { - CDAsyncInstallNatsMessage := &bean7.AsyncCdDeployEvent{} - err := json.Unmarshal([]byte(msg.Data), CDAsyncInstallNatsMessage) - if err != nil { - return "error in unmarshalling CD async install request nats message", []interface{}{"err", err} + return err } - return "got message for devtron chart install", []interface{}{"appId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.AppId, "pipelineId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.PipelineId, "artifactId", CDAsyncInstallNatsMessage.ValuesOverrideRequest.CiArtifactId} - } - - err := impl.pubsubClient.Subscribe(pubsub.DEVTRON_CHART_INSTALL_TOPIC, callback, loggerFunc) - if err != nil { - impl.logger.Error(err) - return err } return nil } @@ -644,7 +475,7 @@ func (impl *WorkflowDagExecutorImpl) handleWebhookExternalCiEvent(artifact *repo // handle corrupt data (https://github.com/devtron-labs/devtron/issues/3826) func (impl *WorkflowDagExecutorImpl) deleteCorruptedPipelineStage(pipelineStage *repository4.PipelineStage, triggeredBy int32) (error, bool) { if pipelineStage != nil { - stageReq := &pipelineConfigBean.PipelineStageDto{ + stageReq := &bean3.PipelineStageDto{ Id: pipelineStage.Id, Type: pipelineStage.Type, } @@ -915,7 +746,7 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext bean5.T IsArtifactUploaded: request.IsArtifactUploaded, AuditLog: sql.AuditLog{CreatedBy: request.UserId, UpdatedBy: request.UserId, CreatedOn: createdOn, UpdatedOn: updatedOn}, } - plugin, err := impl.globalPluginRepository.GetPluginByName(pipelineConfigBean.VULNERABILITY_SCANNING_PLUGIN) + plugin, err := impl.globalPluginRepository.GetPluginByName(bean3.VULNERABILITY_SCANNING_PLUGIN) if err != nil || len(plugin) == 0 { impl.logger.Errorw("error in getting image scanning plugin", "err", err) return 0, err @@ -937,7 +768,7 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext bean5.T var pluginArtifacts []*repository.CiArtifact for registry, artifacts := range request.PluginRegistryArtifactDetails { for _, image := range artifacts { - if pipeline.PipelineType == string(pipelineConfigBean.CI_JOB) && image == "" { + if pipeline.PipelineType == string(bean3.CI_JOB) && image == "" { continue } pluginArtifact := &repository.CiArtifact{ diff --git a/pkg/workflow/status/WorkflowStatusService.go b/pkg/workflow/status/WorkflowStatusService.go index e121e83bd8..f843ec6f35 100644 --- a/pkg/workflow/status/WorkflowStatusService.go +++ b/pkg/workflow/status/WorkflowStatusService.go @@ -4,12 +4,10 @@ import ( "context" "fmt" application2 "github.com/argoproj/argo-cd/v2/pkg/apiclient/application" - pubub "github.com/devtron-labs/common-lib/pubsub-lib" bean2 "github.com/devtron-labs/devtron/api/bean" client "github.com/devtron-labs/devtron/api/helm-app/service" "github.com/devtron-labs/devtron/client/argocdServer" "github.com/devtron-labs/devtron/client/argocdServer/application" - client2 "github.com/devtron-labs/devtron/client/events" appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" @@ -19,10 +17,10 @@ import ( repository3 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" bean3 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" + "github.com/devtron-labs/devtron/pkg/eventProcessor/out" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/workflow/dag" - "github.com/devtron-labs/devtron/pkg/workflow/status/bean" util3 "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/argo" "github.com/go-pg/pg" @@ -58,6 +56,7 @@ type WorkflowStatusServiceImpl struct { argoUserService argo.ArgoUserService pipelineStatusSyncDetailService status.PipelineStatusSyncDetailService argocdClientWrapperService argocdServer.ArgoClientWrapperService + cdPipelineEventPublishService out.CDPipelineEventPublishService cdWorkflowRepository pipelineConfig.CdWorkflowRepository pipelineOverrideRepository chartConfig.PipelineOverrideRepository @@ -69,7 +68,6 @@ type WorkflowStatusServiceImpl struct { pipelineRepository pipelineConfig.PipelineRepository application application.ServiceClient - eventClient client2.EventClient } func NewWorkflowStatusServiceImpl(logger *zap.SugaredLogger, @@ -80,6 +78,7 @@ func NewWorkflowStatusServiceImpl(logger *zap.SugaredLogger, argoUserService argo.ArgoUserService, pipelineStatusSyncDetailService status.PipelineStatusSyncDetailService, argocdClientWrapperService argocdServer.ArgoClientWrapperService, + cdPipelineEventPublishService out.CDPipelineEventPublishService, cdWorkflowRepository pipelineConfig.CdWorkflowRepository, pipelineOverrideRepository chartConfig.PipelineOverrideRepository, installedAppVersionHistoryRepository repository3.InstalledAppVersionHistoryRepository, @@ -88,8 +87,7 @@ func NewWorkflowStatusServiceImpl(logger *zap.SugaredLogger, installedAppRepository repository3.InstalledAppRepository, pipelineStatusTimelineRepository pipelineConfig.PipelineStatusTimelineRepository, pipelineRepository pipelineConfig.PipelineRepository, - application application.ServiceClient, - eventClient client2.EventClient) (*WorkflowStatusServiceImpl, error) { + application application.ServiceClient) (*WorkflowStatusServiceImpl, error) { impl := &WorkflowStatusServiceImpl{ logger: logger, workflowDagExecutor: workflowDagExecutor, @@ -101,6 +99,7 @@ func NewWorkflowStatusServiceImpl(logger *zap.SugaredLogger, argoUserService: argoUserService, pipelineStatusSyncDetailService: pipelineStatusSyncDetailService, argocdClientWrapperService: argocdClientWrapperService, + cdPipelineEventPublishService: cdPipelineEventPublishService, cdWorkflowRepository: cdWorkflowRepository, pipelineOverrideRepository: pipelineOverrideRepository, installedAppVersionHistoryRepository: installedAppVersionHistoryRepository, @@ -110,7 +109,6 @@ func NewWorkflowStatusServiceImpl(logger *zap.SugaredLogger, pipelineStatusTimelineRepository: pipelineStatusTimelineRepository, pipelineRepository: pipelineRepository, application: application, - eventClient: eventClient, } config, err := types.GetCdConfig() if err != nil { @@ -412,16 +410,9 @@ func (impl *WorkflowStatusServiceImpl) CheckAndSendArgoPipelineStatusSyncEventIf // pipelineId can be cdPipelineId or installedAppVersionId, using isAppStoreApplication flag to identify between them if lastSyncTime.IsZero() || (!lastSyncTime.IsZero() && time.Since(lastSyncTime) > 5*time.Second) { // create new nats event - statusUpdateEvent := bean.ArgoPipelineStatusSyncEvent{ - PipelineId: pipelineId, - InstalledAppVersionId: installedAppVersionId, - UserId: userId, - IsAppStoreApplication: isAppStoreApplication, - } - // write event - err = impl.eventClient.WriteNatsEvent(pubub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, statusUpdateEvent) + err = impl.cdPipelineEventPublishService.PublishArgoTypePipelineSyncEvent(pipelineId, installedAppVersionId, userId, isAppStoreApplication) if err != nil { - impl.logger.Errorw("error in writing nats event", "topic", pubub.ARGO_PIPELINE_STATUS_UPDATE_TOPIC, "payload", statusUpdateEvent) + impl.logger.Errorw("error, PublishArgoTypePipelineSyncEvent", "err", err) } } } diff --git a/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go b/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go index 9dcfc422ed..3cd88b8e31 100644 --- a/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go +++ b/vendor/github.com/devtron-labs/common-lib/pubsub-lib/JetStreamUtil.go @@ -95,6 +95,9 @@ const ( CD_STAGE_SUCCESS_EVENT_TOPIC string = "CD-STAGE-SUCCESS-EVENT" CD_STAGE_SUCCESS_EVENT_GROUP string = "CD-STAGE-SUCCESS-EVENT-GROUP" CD_STAGE_SUCCESS_EVENT_DURABLE string = "CD-STAGE-SUCCESS-EVENT-DURABLE" + CD_PIPELINE_DELETE_EVENT_TOPIC string = "CD-PIPELINE-DELETE-EVENT" + CD_PIPELINE_DELETE_EVENT_GROUP string = "CD-PIPELINE-DELETE-EVENT-GROUP" + CD_PIPELINE_DELETE_EVENT_DURABLE string = "CD-PIPELINE-DELETE-EVENT-DURABLE" ) type NatsTopic struct { @@ -138,6 +141,8 @@ var natsTopicMapping = map[string]NatsTopic{ DEVTRON_CHART_INSTALL_TOPIC: {topicName: DEVTRON_CHART_INSTALL_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: DEVTRON_CHART_INSTALL_GROUP, consumerName: DEVTRON_CHART_INSTALL_DURABLE}, PANIC_ON_PROCESSING_TOPIC: {topicName: PANIC_ON_PROCESSING_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: PANIC_ON_PROCESSING_GROUP, consumerName: PANIC_ON_PROCESSING_DURABLE}, CD_STAGE_SUCCESS_EVENT_TOPIC: {topicName: CD_STAGE_SUCCESS_EVENT_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: CD_STAGE_SUCCESS_EVENT_GROUP, consumerName: CD_STAGE_SUCCESS_EVENT_DURABLE}, + + CD_PIPELINE_DELETE_EVENT_TOPIC: {topicName: CD_PIPELINE_DELETE_EVENT_TOPIC, streamName: ORCHESTRATOR_STREAM, queueName: CD_PIPELINE_DELETE_EVENT_GROUP, consumerName: CD_PIPELINE_DELETE_EVENT_DURABLE}, } var NatsStreamWiseConfigMapping = map[string]NatsStreamConfig{ @@ -170,6 +175,8 @@ var NatsConsumerWiseConfigMapping = map[string]NatsConsumerConfig{ DEVTRON_CHART_INSTALL_DURABLE: {}, PANIC_ON_PROCESSING_DURABLE: {}, DEVTRON_TEST_CONSUMER: {}, + CD_STAGE_SUCCESS_EVENT_DURABLE: {}, + CD_PIPELINE_DELETE_EVENT_DURABLE: {}, } // getConsumerConfigMap will fetch the consumer wise config from the json string @@ -236,6 +243,20 @@ func ParseAndFillStreamWiseAndConsumerWiseConfigMaps() { defaultConsumerConfigVal := defaultConfig.GetDefaultNatsConsumerConfig() // initialise all the consumer wise config with default values or user defined values + updateNatsConsumerConfigMapping(defaultConsumerConfigVal, consumerConfigMap) + + // initialise all the stream wise config with default values or user defined values + updateNatsStreamConfigMapping(defaultStreamConfigVal, streamConfigMap) +} + +func updateNatsConsumerConfigMapping(defaultConsumerConfigVal NatsConsumerConfig, consumerConfigMap map[string]NatsConsumerConfig) { + //iterating through all nats topic mappings (assuming source of truth) to update any consumers if not present in consumer mapping + for _, natsTopic := range natsTopicMapping { + if _, ok := NatsConsumerWiseConfigMapping[natsTopic.consumerName]; !ok { + NatsConsumerWiseConfigMapping[natsTopic.consumerName] = NatsConsumerConfig{} + } + } + //initialise all the consumer wise config with default values or user defined values for key, _ := range NatsConsumerWiseConfigMapping { consumerConfig := defaultConsumerConfigVal if _, ok := consumerConfigMap[key]; ok { @@ -243,8 +264,9 @@ func ParseAndFillStreamWiseAndConsumerWiseConfigMaps() { } NatsConsumerWiseConfigMapping[key] = consumerConfig } +} - // initialise all the consumer wise config with default values or user defined values +func updateNatsStreamConfigMapping(defaultStreamConfigVal NatsStreamConfig, streamConfigMap map[string]NatsStreamConfig) { for key, _ := range NatsStreamWiseConfigMapping { streamConfig := defaultStreamConfigVal if _, ok := streamConfigMap[key]; ok { @@ -252,7 +274,6 @@ func ParseAndFillStreamWiseAndConsumerWiseConfigMaps() { } NatsStreamWiseConfigMapping[key] = streamConfig } - } func GetNatsTopic(topicName string) NatsTopic { diff --git a/vendor/modules.txt b/vendor/modules.txt index d5f7540932..9cff4d1f81 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -373,7 +373,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.0.16-0.20240304102639-17132681584e +# github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538 ## explicit; go 1.20 github.com/devtron-labs/common-lib/blob-storage github.com/devtron-labs/common-lib/cloud-provider-identifier diff --git a/wire_gen.go b/wire_gen.go index 1d1e39f4ed..663248363e 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -57,7 +57,6 @@ import ( status4 "github.com/devtron-labs/devtron/api/router/app/pipeline/status" trigger2 "github.com/devtron-labs/devtron/api/router/app/pipeline/trigger" workflow2 "github.com/devtron-labs/devtron/api/router/app/workflow" - "github.com/devtron-labs/devtron/api/router/pubsub" server2 "github.com/devtron-labs/devtron/api/server" "github.com/devtron-labs/devtron/api/sse" team2 "github.com/devtron-labs/devtron/api/team" @@ -97,12 +96,12 @@ import ( "github.com/devtron-labs/devtron/pkg/appClone/batch" appStatus2 "github.com/devtron-labs/devtron/pkg/appStatus" "github.com/devtron-labs/devtron/pkg/appStore/chartGroup" - repository15 "github.com/devtron-labs/devtron/pkg/appStore/chartGroup/repository" + repository16 "github.com/devtron-labs/devtron/pkg/appStore/chartGroup/repository" "github.com/devtron-labs/devtron/pkg/appStore/chartProvider" "github.com/devtron-labs/devtron/pkg/appStore/discover/repository" service4 "github.com/devtron-labs/devtron/pkg/appStore/discover/service" repository3 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" - service2 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" + service3 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode/deployment" @@ -110,7 +109,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/FullMode/resource" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/common" "github.com/devtron-labs/devtron/pkg/appStore/values/repository" - service3 "github.com/devtron-labs/devtron/pkg/appStore/values/service" + service2 "github.com/devtron-labs/devtron/pkg/appStore/values/service" appWorkflow2 "github.com/devtron-labs/devtron/pkg/appWorkflow" "github.com/devtron-labs/devtron/pkg/argoApplication" "github.com/devtron-labs/devtron/pkg/attributes" @@ -161,7 +160,7 @@ import ( "github.com/devtron-labs/devtron/pkg/k8s/capacity" "github.com/devtron-labs/devtron/pkg/k8s/informer" "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" - repository16 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" + repository15 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" "github.com/devtron-labs/devtron/pkg/module" "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" @@ -548,12 +547,12 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - pipelineConfigListenerServiceImpl := pipeline.NewPipelineConfigListenerServiceImpl(sugaredLogger) devtronAppCMCSServiceImpl := pipeline.NewDevtronAppCMCSServiceImpl(sugaredLogger, appServiceImpl, attributesRepositoryImpl) repositoryServiceClientImpl := repository14.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) argoClientWrapperServiceImpl := argocdServer.NewArgoClientWrapperServiceImpl(sugaredLogger, applicationServiceClientImpl, acdConfig, repositoryServiceClientImpl, gitOpsConfigReadServiceImpl, gitOperationServiceImpl) imageDigestPolicyServiceImpl := imageDigestPolicy.NewImageDigestPolicyServiceImpl(sugaredLogger, qualifierMappingServiceImpl, devtronResourceSearchableKeyServiceImpl) - cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deploymentServiceTypeConfig, applicationServiceClientImpl, customTagServiceImpl, pipelineConfigListenerServiceImpl, devtronAppCMCSServiceImpl, ciPipelineConfigServiceImpl, buildPipelineSwitchServiceImpl, argoClientWrapperServiceImpl, deployedAppMetricsServiceImpl, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, chartServiceImpl, imageDigestPolicyServiceImpl) + pipelineConfigEventPublishServiceImpl := out.NewPipelineConfigEventPublishServiceImpl(sugaredLogger, pubSubClientServiceImpl) + cdPipelineConfigServiceImpl := pipeline.NewCdPipelineConfigServiceImpl(sugaredLogger, pipelineRepositoryImpl, environmentRepositoryImpl, pipelineConfigRepositoryImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, appRepositoryImpl, appServiceImpl, deploymentGroupRepositoryImpl, ciCdPipelineOrchestratorImpl, appStatusRepositoryImpl, ciPipelineRepositoryImpl, prePostCdScriptHistoryServiceImpl, clusterRepositoryImpl, helmAppServiceImpl, enforcerUtilImpl, pipelineStrategyHistoryServiceImpl, chartRepositoryImpl, resourceGroupServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deploymentServiceTypeConfig, applicationServiceClientImpl, customTagServiceImpl, devtronAppCMCSServiceImpl, ciPipelineConfigServiceImpl, buildPipelineSwitchServiceImpl, argoClientWrapperServiceImpl, deployedAppMetricsServiceImpl, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, chartServiceImpl, imageDigestPolicyServiceImpl, pipelineConfigEventPublishServiceImpl) appArtifactManagerImpl := pipeline.NewAppArtifactManagerImpl(sugaredLogger, cdWorkflowRepositoryImpl, userServiceImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, ciWorkflowRepositoryImpl, pipelineStageServiceImpl, cdPipelineConfigServiceImpl, dockerArtifactStoreRepositoryImpl, ciPipelineRepositoryImpl, ciTemplateServiceImpl) globalStrategyMetadataChartRefMappingRepositoryImpl := chartRepoRepository.NewGlobalStrategyMetadataChartRefMappingRepositoryImpl(db, sugaredLogger) devtronAppStrategyServiceImpl := pipeline.NewDevtronAppStrategyServiceImpl(sugaredLogger, chartRepositoryImpl, globalStrategyMetadataChartRefMappingRepositoryImpl, ciCdPipelineOrchestratorImpl, cdPipelineConfigServiceImpl) @@ -572,10 +571,7 @@ func InitializeApp() (*App, error) { devtronAppConfigServiceImpl := pipeline.NewDevtronAppConfigServiceImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, appRepositoryImpl, pipelineRepositoryImpl, resourceGroupServiceImpl, enforcerUtilImpl, ciMaterialConfigServiceImpl) pipelineBuilderImpl := pipeline.NewPipelineBuilderImpl(sugaredLogger, materialRepositoryImpl, chartRepositoryImpl, ciPipelineConfigServiceImpl, ciMaterialConfigServiceImpl, appArtifactManagerImpl, devtronAppCMCSServiceImpl, devtronAppStrategyServiceImpl, appDeploymentTypeChangeManagerImpl, cdPipelineConfigServiceImpl, devtronAppConfigServiceImpl) deploymentTemplateValidationServiceImpl := deploymentTemplate.NewDeploymentTemplateValidationServiceImpl(sugaredLogger, chartRefServiceImpl, scopedVariableManagerImpl) - installedAppDBExtendedServiceImpl, err := FullMode.NewInstalledAppDBExtendedServiceImpl(sugaredLogger, installedAppRepositoryImpl, appRepositoryImpl, userServiceImpl, installedAppVersionHistoryRepositoryImpl, appStatusServiceImpl, pubSubClientServiceImpl, gitOpsConfigReadServiceImpl) - if err != nil { - return nil, err - } + installedAppDBExtendedServiceImpl := FullMode.NewInstalledAppDBExtendedServiceImpl(sugaredLogger, installedAppRepositoryImpl, appRepositoryImpl, userServiceImpl, installedAppVersionHistoryRepositoryImpl, appStatusServiceImpl, gitOpsConfigReadServiceImpl) gitOpsValidationServiceImpl := validation.NewGitOpsValidationServiceImpl(sugaredLogger, gitFactory, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, chartTemplateServiceImpl, chartServiceImpl, installedAppDBExtendedServiceImpl) devtronAppGitOpConfigServiceImpl := gitOpsConfig.NewDevtronAppGitOpConfigServiceImpl(sugaredLogger, chartRepositoryImpl, chartServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, argoClientWrapperServiceImpl) cdHandlerImpl := pipeline.NewCdHandlerImpl(sugaredLogger, userServiceImpl, cdWorkflowRepositoryImpl, ciLogServiceImpl, ciArtifactRepositoryImpl, ciPipelineMaterialRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, ciWorkflowRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, imageTaggingServiceImpl, k8sServiceImpl, workflowServiceImpl, clusterServiceImplExtended, blobStorageConfigServiceImpl, customTagServiceImpl) @@ -585,7 +581,7 @@ func InitializeApp() (*App, error) { appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl, userRepositoryImpl, deployedAppMetricsServiceImpl) appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, ciPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl) deploymentTemplateRepositoryImpl := repository2.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) - generateManifestDeploymentTemplateServiceImpl := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, appListingRepositoryImpl, deploymentTemplateRepositoryImpl, helmAppServiceImpl, chartRepositoryImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, chartRefServiceImpl) + generateManifestDeploymentTemplateServiceImpl := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, deploymentTemplateRepositoryImpl, helmAppServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, chartRefServiceImpl) cvePolicyRepositoryImpl := security.NewPolicyRepositoryImpl(db) imageScanResultRepositoryImpl := security.NewImageScanResultRepositoryImpl(db, sugaredLogger) imageScanDeployInfoRepositoryImpl := security.NewImageScanDeployInfoRepositoryImpl(db, sugaredLogger) @@ -602,7 +598,7 @@ func InitializeApp() (*App, error) { return nil, err } commonArtifactServiceImpl := artifacts.NewCommonArtifactServiceImpl(sugaredLogger, ciArtifactRepositoryImpl) - workflowDagExecutorImpl := dag.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, pubSubClientServiceImpl, ciArtifactRepositoryImpl, enforcerUtilImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, ciWorkflowRepositoryImpl, ciPipelineRepositoryImpl, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, customTagServiceImpl, helmAppServiceImpl, pipelineConfigListenerServiceImpl, cdWorkflowCommonServiceImpl, triggerServiceImpl, manifestCreationServiceImpl, commonArtifactServiceImpl) + workflowDagExecutorImpl := dag.NewWorkflowDagExecutorImpl(sugaredLogger, pipelineRepositoryImpl, cdWorkflowRepositoryImpl, ciArtifactRepositoryImpl, enforcerUtilImpl, appWorkflowRepositoryImpl, pipelineStageServiceImpl, ciWorkflowRepositoryImpl, ciPipelineRepositoryImpl, pipelineStageRepositoryImpl, globalPluginRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, customTagServiceImpl, helmAppServiceImpl, cdWorkflowCommonServiceImpl, triggerServiceImpl, manifestCreationServiceImpl, commonArtifactServiceImpl) externalCiRestHandlerImpl := restHandler.NewExternalCiRestHandlerImpl(sugaredLogger, validate, userServiceImpl, enforcerImpl, workflowDagExecutorImpl) pubSubClientRestHandlerImpl := restHandler.NewPubSubClientRestHandlerImpl(pubSubClientServiceImpl, sugaredLogger, ciCdConfig) webhookRouterImpl := router.NewWebhookRouterImpl(gitWebhookRestHandlerImpl, pipelineConfigRestHandlerImpl, externalCiRestHandlerImpl, pubSubClientRestHandlerImpl) @@ -640,18 +636,6 @@ func InitializeApp() (*App, error) { notificationRouterImpl := router.NewNotificationRouterImpl(notificationRestHandlerImpl) teamRestHandlerImpl := team2.NewTeamRestHandlerImpl(sugaredLogger, teamServiceImpl, userServiceImpl, enforcerImpl, validate, userAuthServiceImpl, deleteServiceExtendedImpl) teamRouterImpl := team2.NewTeamRouterImpl(teamRestHandlerImpl) - gitWebhookHandlerImpl := pubsub.NewGitWebhookHandler(sugaredLogger, pubSubClientServiceImpl, gitWebhookServiceImpl) - chartGroupDeploymentRepositoryImpl := repository15.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) - clusterInstalledAppsRepositoryImpl := repository3.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) - eaModeDeploymentServiceImpl := EAMode.NewEAModeDeploymentServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, ociRegistryConfigRepositoryImpl) - appStoreDeploymentCommonServiceImpl := appStoreDeploymentCommon.NewAppStoreDeploymentCommonServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, chartTemplateServiceImpl) - fullModeDeploymentServiceImpl := deployment.NewFullModeDeploymentServiceImpl(sugaredLogger, applicationServiceClientImpl, argoK8sClientImpl, acdAuthConfig, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl, argoClientWrapperServiceImpl, acdConfig, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, environmentRepositoryImpl) - serviceDeploymentServiceTypeConfig, err := service2.GetDeploymentServiceTypeConfig() - if err != nil { - return nil, err - } - appStoreDeploymentServiceImpl := service2.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, eaModeDeploymentServiceImpl, fullModeDeploymentServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, serviceDeploymentServiceTypeConfig, acdConfig, gitOpsConfigReadServiceImpl) - applicationStatusHandlerImpl := pubsub.NewApplicationStatusHandlerImpl(sugaredLogger, pubSubClientServiceImpl, appServiceImpl, workflowDagExecutorImpl, installedAppDBExtendedServiceImpl, appStoreDeploymentServiceImpl, pipelineBuilderImpl, pipelineRepositoryImpl, installedAppRepositoryImpl, cdWorkflowCommonServiceImpl) roleGroupServiceImpl := user.NewRoleGroupServiceImpl(userAuthRepositoryImpl, sugaredLogger, userRepositoryImpl, roleGroupRepositoryImpl, userCommonServiceImpl) userRestHandlerImpl := user2.NewUserRestHandlerImpl(userServiceImpl, validate, sugaredLogger, enforcerImpl, roleGroupServiceImpl, userCommonServiceImpl) userRouterImpl := user2.NewUserRouterImpl(userRestHandlerImpl) @@ -659,7 +643,7 @@ func InitializeApp() (*App, error) { chartRefRouterImpl := router.NewChartRefRouterImpl(chartRefRestHandlerImpl) configMapRestHandlerImpl := restHandler.NewConfigMapRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, chartServiceImpl, userServiceImpl, teamServiceImpl, enforcerImpl, pipelineRepositoryImpl, enforcerUtilImpl, configMapServiceImpl) configMapRouterImpl := router.NewConfigMapRouterImpl(configMapRestHandlerImpl) - k8sResourceHistoryRepositoryImpl := repository16.NewK8sResourceHistoryRepositoryImpl(db, sugaredLogger) + k8sResourceHistoryRepositoryImpl := repository15.NewK8sResourceHistoryRepositoryImpl(db, sugaredLogger) k8sResourceHistoryServiceImpl := kubernetesResourceAuditLogs.Newk8sResourceHistoryServiceImpl(k8sResourceHistoryRepositoryImpl, sugaredLogger, appRepositoryImpl, environmentRepositoryImpl) ephemeralContainersRepositoryImpl := repository.NewEphemeralContainersRepositoryImpl(db) ephemeralContainerServiceImpl := cluster2.NewEphemeralContainerServiceImpl(ephemeralContainersRepositoryImpl, sugaredLogger) @@ -669,20 +653,32 @@ func InitializeApp() (*App, error) { return nil, err } installedAppResourceServiceImpl := resource.NewInstalledAppResourceServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, applicationServiceClientImpl, acdAuthConfig, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, appStatusServiceImpl, k8sServiceImpl, k8sCommonServiceImpl, k8sApplicationServiceImpl) - chartGroupEntriesRepositoryImpl := repository15.NewChartGroupEntriesRepositoryImpl(db, sugaredLogger) - chartGroupReposotoryImpl := repository15.NewChartGroupReposotoryImpl(db, sugaredLogger) + chartGroupEntriesRepositoryImpl := repository16.NewChartGroupEntriesRepositoryImpl(db, sugaredLogger) + chartGroupReposotoryImpl := repository16.NewChartGroupReposotoryImpl(db, sugaredLogger) + chartGroupDeploymentRepositoryImpl := repository16.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) appStoreVersionValuesRepositoryImpl := appStoreValuesRepository.NewAppStoreVersionValuesRepositoryImpl(sugaredLogger, db) - appStoreValuesServiceImpl := service3.NewAppStoreValuesServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userServiceImpl) - chartGroupServiceImpl, err := chartGroup.NewChartGroupServiceImpl(sugaredLogger, chartGroupEntriesRepositoryImpl, chartGroupReposotoryImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userAuthServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, teamRepositoryImpl, appStoreValuesServiceImpl, pubSubClientServiceImpl, environmentServiceImpl, appStoreDeploymentServiceImpl, argoUserServiceImpl, pipelineStatusTimelineServiceImpl, acdConfig, fullModeDeploymentServiceImpl, gitOperationServiceImpl) + appStoreValuesServiceImpl := service2.NewAppStoreValuesServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userServiceImpl) + clusterInstalledAppsRepositoryImpl := repository3.NewClusterInstalledAppsRepositoryImpl(db, sugaredLogger) + eaModeDeploymentServiceImpl := EAMode.NewEAModeDeploymentServiceImpl(sugaredLogger, helmAppServiceImpl, appStoreApplicationVersionRepositoryImpl, helmAppClientImpl, installedAppRepositoryImpl, ociRegistryConfigRepositoryImpl) + appStoreDeploymentCommonServiceImpl := appStoreDeploymentCommon.NewAppStoreDeploymentCommonServiceImpl(sugaredLogger, appStoreApplicationVersionRepositoryImpl, chartTemplateServiceImpl) + fullModeDeploymentServiceImpl := deployment.NewFullModeDeploymentServiceImpl(sugaredLogger, applicationServiceClientImpl, argoK8sClientImpl, acdAuthConfig, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, appStoreDeploymentCommonServiceImpl, helmAppServiceImpl, appStatusServiceImpl, pipelineStatusTimelineServiceImpl, userServiceImpl, pipelineStatusTimelineRepositoryImpl, appStoreApplicationVersionRepositoryImpl, argoClientWrapperServiceImpl, acdConfig, gitOperationServiceImpl, gitOpsConfigReadServiceImpl, gitOpsValidationServiceImpl, environmentRepositoryImpl) + serviceDeploymentServiceTypeConfig, err := service3.GetDeploymentServiceTypeConfig() + if err != nil { + return nil, err + } + appStoreDeploymentServiceImpl := service3.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, clusterInstalledAppsRepositoryImpl, appRepositoryImpl, eaModeDeploymentServiceImpl, fullModeDeploymentServiceImpl, environmentServiceImpl, clusterServiceImplExtended, helmAppServiceImpl, appStoreDeploymentCommonServiceImpl, installedAppVersionHistoryRepositoryImpl, serviceDeploymentServiceTypeConfig, acdConfig, gitOpsConfigReadServiceImpl) + appStoreAppsEventPublishServiceImpl := out.NewAppStoreAppsEventPublishServiceImpl(sugaredLogger, pubSubClientServiceImpl) + chartGroupServiceImpl, err := chartGroup.NewChartGroupServiceImpl(sugaredLogger, chartGroupEntriesRepositoryImpl, chartGroupReposotoryImpl, chartGroupDeploymentRepositoryImpl, installedAppRepositoryImpl, appStoreVersionValuesRepositoryImpl, userAuthServiceImpl, appStoreApplicationVersionRepositoryImpl, environmentRepositoryImpl, teamRepositoryImpl, appStoreValuesServiceImpl, environmentServiceImpl, appStoreDeploymentServiceImpl, argoUserServiceImpl, pipelineStatusTimelineServiceImpl, acdConfig, fullModeDeploymentServiceImpl, gitOperationServiceImpl, appStoreAppsEventPublishServiceImpl) if err != nil { return nil, err } installedAppDBServiceImpl := EAMode.NewInstalledAppDBServiceImpl(sugaredLogger, installedAppRepositoryImpl, appRepositoryImpl, userServiceImpl, installedAppVersionHistoryRepositoryImpl) - workflowStatusServiceImpl, err := status2.NewWorkflowStatusServiceImpl(sugaredLogger, workflowDagExecutorImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, acdConfig, appServiceConfig, argoUserServiceImpl, pipelineStatusSyncDetailServiceImpl, argoClientWrapperServiceImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, installedAppRepositoryImpl, pipelineStatusTimelineRepositoryImpl, pipelineRepositoryImpl, applicationServiceClientImpl, eventRESTClientImpl) + cdPipelineEventPublishServiceImpl := out.NewCDPipelineEventPublishServiceImpl(sugaredLogger, pubSubClientServiceImpl) + workflowStatusServiceImpl, err := status2.NewWorkflowStatusServiceImpl(sugaredLogger, workflowDagExecutorImpl, pipelineStatusTimelineServiceImpl, appServiceImpl, appStatusServiceImpl, acdConfig, appServiceConfig, argoUserServiceImpl, pipelineStatusSyncDetailServiceImpl, argoClientWrapperServiceImpl, cdPipelineEventPublishServiceImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, installedAppRepositoryImpl, pipelineStatusTimelineRepositoryImpl, pipelineRepositoryImpl, applicationServiceClientImpl) if err != nil { return nil, err } - cdApplicationStatusUpdateHandlerImpl := cron2.NewCdApplicationStatusUpdateHandlerImpl(sugaredLogger, appServiceImpl, workflowDagExecutorImpl, installedAppDBServiceImpl, cdHandlerImpl, appServiceConfig, pubSubClientServiceImpl, pipelineStatusTimelineRepositoryImpl, eventRESTClientImpl, appListingRepositoryImpl, cdWorkflowRepositoryImpl, pipelineRepositoryImpl, installedAppVersionHistoryRepositoryImpl, installedAppRepositoryImpl, cronLoggerImpl, cdWorkflowCommonServiceImpl, workflowStatusServiceImpl) + cdApplicationStatusUpdateHandlerImpl := cron2.NewCdApplicationStatusUpdateHandlerImpl(sugaredLogger, appServiceImpl, workflowDagExecutorImpl, installedAppDBServiceImpl, appServiceConfig, pipelineStatusTimelineRepositoryImpl, eventRESTClientImpl, appListingRepositoryImpl, cdWorkflowRepositoryImpl, pipelineRepositoryImpl, installedAppVersionHistoryRepositoryImpl, installedAppRepositoryImpl, cronLoggerImpl, cdWorkflowCommonServiceImpl, workflowStatusServiceImpl) installedAppDeploymentTypeChangeServiceImpl := deploymentTypeChange.NewInstalledAppDeploymentTypeChangeServiceImpl(sugaredLogger, installedAppRepositoryImpl, installedAppVersionHistoryRepositoryImpl, appStatusRepositoryImpl, gitOpsConfigReadServiceImpl, environmentRepositoryImpl, applicationServiceClientImpl, k8sCommonServiceImpl, k8sServiceImpl, fullModeDeploymentServiceImpl, eaModeDeploymentServiceImpl, argoClientWrapperServiceImpl, chartGroupServiceImpl, helmAppServiceImpl, argoUserServiceImpl, clusterServiceImplExtended, appRepositoryImpl) installedAppRestHandlerImpl := appStore.NewInstalledAppRestHandlerImpl(sugaredLogger, userServiceImpl, enforcerImpl, enforcerUtilImpl, enforcerUtilHelmImpl, installedAppDBExtendedServiceImpl, installedAppResourceServiceImpl, chartGroupServiceImpl, validate, clusterServiceImplExtended, applicationServiceClientImpl, appStoreDeploymentServiceImpl, helmAppClientImpl, argoUserServiceImpl, cdApplicationStatusUpdateHandlerImpl, installedAppRepositoryImpl, appCrudOperationServiceImpl, installedAppDeploymentTypeChangeServiceImpl) appStoreValuesRestHandlerImpl := appStoreValues.NewAppStoreValuesRestHandlerImpl(sugaredLogger, userServiceImpl, appStoreValuesServiceImpl) @@ -766,16 +762,14 @@ func InitializeApp() (*App, error) { telemetryRouterImpl := router.NewTelemetryRouterImpl(sugaredLogger, telemetryRestHandlerImpl) bulkUpdateRepositoryImpl := bulkUpdate.NewBulkUpdateRepository(db, sugaredLogger) deployedAppServiceImpl := deployedApp.NewDeployedAppServiceImpl(sugaredLogger, k8sCommonServiceImpl, triggerServiceImpl, environmentRepositoryImpl, pipelineRepositoryImpl, cdWorkflowRepositoryImpl) - bulkUpdateServiceImpl, err := bulkAction.NewBulkUpdateServiceImpl(bulkUpdateRepositoryImpl, sugaredLogger, environmentRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, workflowDagExecutorImpl, pipelineBuilderImpl, enforcerUtilImpl, ciHandlerImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, appWorkflowServiceImpl, pubSubClientServiceImpl, argoUserServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, triggerServiceImpl, deployedAppServiceImpl, cdWorkflowCommonServiceImpl) - if err != nil { - return nil, err - } + bulkUpdateServiceImpl := bulkAction.NewBulkUpdateServiceImpl(bulkUpdateRepositoryImpl, sugaredLogger, environmentRepositoryImpl, pipelineRepositoryImpl, appRepositoryImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, pipelineBuilderImpl, enforcerUtilImpl, ciHandlerImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, appWorkflowServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, deployedAppServiceImpl, cdPipelineEventPublishServiceImpl) bulkUpdateRestHandlerImpl := restHandler.NewBulkUpdateRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, bulkUpdateServiceImpl, chartServiceImpl, propertiesConfigServiceImpl, applicationServiceClientImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, environmentServiceImpl, gitRegistryConfigImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, policyServiceImpl, imageScanResultRepositoryImpl, argoUserServiceImpl) bulkUpdateRouterImpl := router.NewBulkUpdateRouterImpl(bulkUpdateRestHandlerImpl) webhookSecretValidatorImpl := git2.NewWebhookSecretValidatorImpl(sugaredLogger) webhookEventDataRepositoryImpl := repository2.NewWebhookEventDataRepositoryImpl(db) webhookEventDataConfigImpl := pipeline.NewWebhookEventDataConfigImpl(sugaredLogger, webhookEventDataRepositoryImpl) - webhookEventHandlerImpl := restHandler.NewWebhookEventHandlerImpl(sugaredLogger, gitHostConfigImpl, eventRESTClientImpl, webhookSecretValidatorImpl, webhookEventDataConfigImpl) + ciPipelineEventPublishServiceImpl := out.NewCIPipelineEventPublishServiceImpl(sugaredLogger, pubSubClientServiceImpl) + webhookEventHandlerImpl := restHandler.NewWebhookEventHandlerImpl(sugaredLogger, gitHostConfigImpl, eventRESTClientImpl, webhookSecretValidatorImpl, webhookEventDataConfigImpl, ciPipelineEventPublishServiceImpl) webhookListenerRouterImpl := router.NewWebhookListenerRouterImpl(webhookEventHandlerImpl) appFilteringRestHandlerImpl := appList.NewAppFilteringRestHandlerImpl(sugaredLogger, teamServiceImpl, enforcerImpl, userServiceImpl, clusterServiceImplExtended, environmentServiceImpl) appFilteringRouterImpl := appList2.NewAppFilteringRouterImpl(appFilteringRestHandlerImpl) @@ -784,7 +778,7 @@ func InitializeApp() (*App, error) { appInfoRestHandlerImpl := appInfo.NewAppInfoRestHandlerImpl(sugaredLogger, appCrudOperationServiceImpl, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, helmAppServiceImpl, enforcerUtilHelmImpl, genericNoteServiceImpl) appInfoRouterImpl := appInfo2.NewAppInfoRouterImpl(sugaredLogger, appInfoRestHandlerImpl) deploymentConfigServiceImpl := pipeline.NewDeploymentConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, pipelineRepositoryImpl, pipelineConfigRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl, scopedVariableCMCSManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl) - pipelineTriggerRestHandlerImpl := trigger.NewPipelineRestHandler(appServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl, sugaredLogger, enforcerUtilImpl, workflowDagExecutorImpl, deploymentGroupServiceImpl, argoUserServiceImpl, deploymentConfigServiceImpl, deployedAppServiceImpl, triggerServiceImpl, workflowEventPublishServiceImpl) + pipelineTriggerRestHandlerImpl := trigger.NewPipelineRestHandler(appServiceImpl, userServiceImpl, validate, enforcerImpl, teamServiceImpl, sugaredLogger, enforcerUtilImpl, deploymentGroupServiceImpl, argoUserServiceImpl, deploymentConfigServiceImpl, deployedAppServiceImpl, triggerServiceImpl, workflowEventPublishServiceImpl) sseSSE := sse.NewSSE() pipelineTriggerRouterImpl := trigger2.NewPipelineTriggerRouter(pipelineTriggerRestHandlerImpl, sseSSE) webhookDataRestHandlerImpl := webhook.NewWebhookDataRestHandlerImpl(sugaredLogger, userServiceImpl, ciPipelineMaterialRepositoryImpl, enforcerUtilImpl, enforcerImpl, clientImpl, webhookEventDataConfigImpl) @@ -884,19 +878,23 @@ func InitializeApp() (*App, error) { infraConfigRouterImpl := infraConfig2.NewInfraProfileRouterImpl(infraConfigRestHandlerImpl) argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, sugaredLogger, enforcerImpl) argoApplicationRouterImpl := argoApplication2.NewArgoApplicationRouterImpl(argoApplicationRestHandlerImpl) - muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, gitWebhookHandlerImpl, applicationStatusHandlerImpl, pubSubClientServiceImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl) + muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl) loggingMiddlewareImpl := util4.NewLoggingMiddlewareImpl(userServiceImpl) cdWorkflowServiceImpl := cd.NewCdWorkflowServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) cdWorkflowRunnerServiceImpl := cd.NewCdWorkflowRunnerServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowCommonServiceImpl) - workflowEventProcessorImpl, err := in.NewWorkflowEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowServiceImpl, cdWorkflowRunnerServiceImpl, workflowDagExecutorImpl, argoUserServiceImpl, ciHandlerImpl, cdHandlerImpl, eventSimpleFactoryImpl, eventRESTClientImpl, triggerServiceImpl, deployedAppServiceImpl, webhookServiceImpl, validate, globalEnvVariables, cdWorkflowCommonServiceImpl, pipelineRepositoryImpl, ciArtifactRepositoryImpl, cdWorkflowRepositoryImpl) + workflowEventProcessorImpl, err := in.NewWorkflowEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowServiceImpl, cdWorkflowRunnerServiceImpl, workflowDagExecutorImpl, argoUserServiceImpl, ciHandlerImpl, cdHandlerImpl, eventSimpleFactoryImpl, eventRESTClientImpl, triggerServiceImpl, deployedAppServiceImpl, webhookServiceImpl, validate, globalEnvVariables, cdWorkflowCommonServiceImpl, cdPipelineConfigServiceImpl, pipelineRepositoryImpl, ciArtifactRepositoryImpl, cdWorkflowRepositoryImpl) if err != nil { return nil, err } - centralEventProcessor, err := eventProcessor.NewCentralEventProcessor(workflowEventProcessorImpl, sugaredLogger) + ciPipelineEventProcessorImpl := in.NewCIPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, gitWebhookServiceImpl) + cdPipelineEventProcessorImpl := in.NewCDPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowCommonServiceImpl, workflowStatusServiceImpl, triggerServiceImpl, argoUserServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl) + deployedApplicationEventProcessorImpl := in.NewDeployedApplicationEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, gitOpsConfigReadServiceImpl, installedAppDBExtendedServiceImpl, workflowDagExecutorImpl, cdWorkflowCommonServiceImpl, pipelineBuilderImpl, appStoreDeploymentServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl) + appStoreAppsEventProcessorImpl := in.NewAppStoreAppsEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, chartGroupServiceImpl, installedAppVersionHistoryRepositoryImpl) + centralEventProcessor, err := eventProcessor.NewCentralEventProcessor(sugaredLogger, workflowEventProcessorImpl, ciPipelineEventProcessorImpl, cdPipelineEventProcessorImpl, deployedApplicationEventProcessorImpl, appStoreAppsEventProcessorImpl) if err != nil { return nil, err } - mainApp := NewApp(muxRouter, sugaredLogger, sseSSE, syncedEnforcer, db, pubSubClientServiceImpl, sessionManager, posthogClient, loggingMiddlewareImpl, centralEventProcessor) + mainApp := NewApp(muxRouter, sugaredLogger, sseSSE, syncedEnforcer, db, sessionManager, posthogClient, loggingMiddlewareImpl, centralEventProcessor) return mainApp, nil } From 8e4cc36eefad66c82d644fac9f83bd3f7aadc568 Mon Sep 17 00:00:00 2001 From: Prakash Date: Wed, 20 Mar 2024 17:09:24 +0530 Subject: [PATCH 08/29] fix: hide ldap creds in in get req (#4788) * hide ldap creds in in get req * removed unnecessary code * refactor * revert * refactor --- pkg/auth/sso/SSOLoginService.go | 61 +++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/pkg/auth/sso/SSOLoginService.go b/pkg/auth/sso/SSOLoginService.go index 0c121be8c4..0820f5e8c5 100644 --- a/pkg/auth/sso/SSOLoginService.go +++ b/pkg/auth/sso/SSOLoginService.go @@ -55,8 +55,47 @@ type Config struct { Config map[string]interface{} `json:"config"` } -const ClientID = "clientID" -const ClientSecret = "clientSecret" +func (r *Config) IsSsoLdap() bool { + return r.Name == LDAP +} + +func (r *Config) secureCredentials() { + r.secureCredentialValue(ClientID) + r.secureCredentialValue(ClientSecret) + if r.IsSsoLdap() { + r.secureCredentialValue(LdapBindPW) + r.secureCredentialValue(LdapUsernamePrompt) + } +} + +func (r *Config) secureCredentialValue(credentialKey string) { + if r.Config[credentialKey] != nil { + r.Config[credentialKey] = "" + } +} + +func (r *Config) updateCredentialsFromBase(configFromDb *Config) { + r.updateSecretFromBase(configFromDb, ClientID) + r.updateSecretFromBase(configFromDb, ClientSecret) + if r.IsSsoLdap() { + r.updateSecretFromBase(configFromDb, LdapBindPW) + r.updateSecretFromBase(configFromDb, LdapUsernamePrompt) + } +} + +func (r *Config) updateSecretFromBase(baseConfigData *Config, key string) { + if r.Config[key] == "" && baseConfigData.Config[key] != nil { + r.Config[key] = baseConfigData.Config[key] + } +} + +const ( + ClientID = "clientID" + ClientSecret = "clientSecret" + LdapBindPW = "bindPW" + LdapUsernamePrompt = "usernamePrompt" + LDAP = "LDAP" +) func NewSSOLoginServiceImpl( logger *zap.SugaredLogger, @@ -184,8 +223,7 @@ func (impl SSOLoginServiceImpl) UpdateSSOLogin(request *bean.SSOLoginDto) (*bean impl.logger.Errorw("error while Unmarshalling model's config", "error", err) return nil, err } - updateSecretFromBase(&configData, &modelConfigData, ClientID) - updateSecretFromBase(&configData, &modelConfigData, ClientSecret) + configData.updateCredentialsFromBase(&modelConfigData) newConfigString, err := json.Marshal(configData) if err != nil { impl.logger.Errorw("error while Marshaling configData", "error", err) @@ -360,8 +398,7 @@ func (impl SSOLoginServiceImpl) GetByName(name string) (*bean.SSOLoginDto, error impl.logger.Errorw("error while Unmarshalling model's config", "error", err) return nil, err } - secureCredentialValue(&configData, ClientID) - secureCredentialValue(&configData, ClientSecret) + configData.secureCredentials() configString, err := json.Marshal(configData) if err != nil { impl.logger.Errorw("error while Marshaling configData", "error", err) @@ -383,15 +420,3 @@ func (impl SSOLoginServiceImpl) GetByName(name string) (*bean.SSOLoginDto, error } return ssoLoginDto, nil } - -func updateSecretFromBase(configData *Config, baseConfigData *Config, key string) { - if configData.Config[key] == "" && baseConfigData.Config[key] != nil { - configData.Config[key] = baseConfigData.Config[key] - } -} - -func secureCredentialValue(configData *Config, credentialKey string) { - if configData.Config[credentialKey] != nil { - configData.Config[credentialKey] = "" - } -} From 4da24a7268cd98ebe1ec2fd511e3cbb47c8a20c6 Mon Sep 17 00:00:00 2001 From: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Date: Thu, 21 Mar 2024 11:24:30 +0530 Subject: [PATCH 09/29] pointer binding (#4826) --- pkg/pipeline/types/CiCdConfig.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pipeline/types/CiCdConfig.go b/pkg/pipeline/types/CiCdConfig.go index 934a82a682..9f8cde5d17 100644 --- a/pkg/pipeline/types/CiCdConfig.go +++ b/pkg/pipeline/types/CiCdConfig.go @@ -499,7 +499,7 @@ type Trigger struct { ReferenceCiWorkflowId int } -func (obj Trigger) BuildTriggerObject(refCiWorkflow *pipelineConfig.CiWorkflow, +func (obj *Trigger) BuildTriggerObject(refCiWorkflow *pipelineConfig.CiWorkflow, ciMaterials []*pipelineConfig.CiPipelineMaterial, triggeredBy int32, invalidateCache bool, extraEnvironmentVariables map[string]string, pipelineType string) { From b93c00f10cc75474706021dbde844d85eadfc797 Mon Sep 17 00:00:00 2001 From: Yashasvi17 <155513200+YashasviDevtron@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:04:25 +0530 Subject: [PATCH 10/29] feat: Branch Divergence Checker Plugin (#4806) * Branch Divergence Checker Plugin * Delete scripts/sql/229_GitHub_branch_divergence_checker_v1.0.down.sql * Delete assets/GitHubBranchDivergenceCheckerlogo.png * Adding assets * Changing asset name * Removing asset * Update 229_github_branch_divergence_checker.up.sql --- assets/branch-compare-plugin-logo.png | Bin 0 -> 5552 bytes ..._gitHub_branch_divergence_checker.down.sql | 8 +++ ...29_github_branch_divergence_checker.up.sql | 63 ++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 assets/branch-compare-plugin-logo.png create mode 100644 scripts/sql/229_gitHub_branch_divergence_checker.down.sql create mode 100644 scripts/sql/229_github_branch_divergence_checker.up.sql diff --git a/assets/branch-compare-plugin-logo.png b/assets/branch-compare-plugin-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d7529f2d48db7095f6a7ac8830e1b7051fd5e0a8 GIT binary patch literal 5552 zcmZ`-c{r3^*gvz_hbT1mX`@7lY*~h6&rr#}heVdFSw~YUM2j_&w@9`uSzd&ZNFmvm zFhuq(`#QeoncnNWuJ4a;{+Q>S``pX#zR&V|&hyC7K#P@`ml*)Ss-vxO82|*-1S5>} z(Eps;zlP8Q?W}f54S=FpmR&nKsE>2dzI+LQKw$ug_W;<2M8qiod?f*xu?0Xe34p_% zsg*`b&;Wyv&ZUbC6U@8}N7NA5RT^{NDYzm~}_m!QsZ&w)Yv(E#q?GtLu6IXSvqx;Q56*xHF28>B8@ zwl-5Xk^6GDjQHg!w9WwjD;?J_O}{#(E<-aZc>~GAxks?vWo^yLWA5a{p%p8qi4=-< zEE|=i?0sj_fA~(#mQ&;7TvHb@YsHFox7Mc<2V^XwR=3Vf$7ZZ`*yz~A3w}4M9y=M@ zXdd!vOn&%uNPk09Lg0rrro-o1GNgweo@(H(7PQSiXCW

w9T4vqL1)LdI70$0Q+R z^zJzl$I_L&#VA3WhW4?Ja+DAJUu@&dt=jF&CHwyLM45Uag}oGZ&O%uzkjp}R3L{#%V0#KIH}r7DmX^wc{^{& zH`Stczf}{xs?1)L;B{yY4UR_*?epGHz5Cw6P5Pz?wOh`4-`=-JuFK^CO2`sY(9Esp z?&SV?E)C27bBgaqO0dc73enj_2#WhUjhEjr`ha3n9<6gw%8BEmO{n-xxUc+n1Z0CHHfn$z&HA+%Tz$x)~!;Q2^#tgh*DC^WEaelXYu zw@H*aQBn6{GGm6Q{FDI8i*FW8eENNAZ#kvk{~zg{h|@S&_^qXmdbLzs=$~{PtGv8|cwZ-q~I+aY>?cX+YgVcW_S$Uy>JG>-H zm!(2gfo?P6+zjWm$57P4dO$>im|UBg2g{3@S8|_i$C|uJu&5W6H-$YdAAKrdXA-5% zV#q7~Pi?>XwYn0x44)EFvi@#t7tiC(n`JbLwM3P)VeLDDqsL?)*k9VCywx1!oH`;1 z2|wdf&dc>%M!I`y6l~-y$C?BaA?ZnTf^HalXC*~=KY3xk<_N$eKr%(}W9P(G&I0z+ zR?{veZ4+S%;dM}s>2auwGp#+Zy6pQ-y!*QE>R#U_3DN;^a8*FfHWUK_!jO#+D3JL| zHm_ps4;C!p)k#PI@X4`swS3#-<$OUg05F4-8d}>a`Q8^^?moX-XK_rvkq^qmbgeRo zV0-x?slBlClKqs%q!E8y+U?1*#qT(eUeZP>A@OCBBnXs_8FKd*jkns|vOlhBE&GM{ z2wP)(hd07Mbx0(D@Fqm3-nc_+ZBeyrvMO^$x~Uo6wP6q+T087kt!xZr1`?Z6Um5ug zZT$Aj|ESWdW8>v>=90vkoP{sz4G&{B(;sBQ z6RPE{or;n}fb&C1aZ+~qV(#W;Vn4aQV=)3vjMY(PooZRz_xmDFcz4-0M_D>!w!87d z`+ShjKVmJscM`|We>sFI6UX-AHS;Y@hPT^?&!`2qvb0FLUt-gWrGJt+=IOxi>*zzA zS0*c!&-{~mvsC=y4*KdN72}Boe9unRt!0x|9{&i+ZRhK3C?LU}^sY~luvabpB#X}f z=OD`w`8Fd*L6+m<+YdvxvaU(8?xhB~DPX{^I$2pto)iCZ-_ZQvr|o@Z4_&>G=|;`9 zj!>3=bGyo%V>_LIAvI~$G>`5HolB;8&yAG3HDRkNGu=}Q9tD?7(0~}f#+<+NaH=@RsYd;97MZ(o z^~U+Uf!p~5s~?QY1i)r9aw2=tV%USvZxh%x`9^iRBN7`b=kq6k2?U672dM}#4Gg+&DhZ20oVG$ec|1ty zyPqo+Wd-fNO@S&$6`ODdFh3jD_E?_yRza@fX9y=CFK&u^NlXA>Ba%6@(-tF7Oo**p zLV?l8lzwg!v_GOtDZ)Cv0S4xt%g&u z8vc0Q?dtQ5u2{J9gb}2UZY19|(1%W0h-Ue5>(?l@;a@UQa2LrMqu}0ZGj5j+_lN-H zP#M(apFXnwcN7!NMTdnCH$Z%X7{REXC$DTh8-R~Tz}U28VZ6NekT!_N)W!?CYe5?r zieS6Rnqd1E7ZOeW_6(y6i}5GHjgq!|A5cm*OMTR13zdZ-9J4;qz-uIg{aWJGx`~-H zfQFoLL1=1cmH7T*D@KgO{4pnuLS%%u z2R|rjvmI2D{#D5SAd*ORF9_;-@Dl=!zY9$7|F6=lm)!ctrg6v{?g)^t_YjYO52Qy3 zGEZo(hm0tS*xU#nIK`ZU?O0*~Yzf6pf z>63R1hOVlJzV9ExU=V2H9mHO1&-Av14ek&mz}1C-yF^E%H$S=wiVw8CP8Y7AWuG#^ zrZ}L9AJN1?O$aa64&y$?33nhT()0vx0>fVmCU7jeu3zaW3||d={CcR5k>-gV9K+#- zVdF^P*_b|ms4r3qLIk;zB#?PXKPBm5)JyJUjz(NE01t)0Hp2CQ2_Fc`d-Zl8}FMN?SGU#ySkX~)dM%kCM1oT3!& zdqN#|9AK*a3QRP8beY+qfsp|AKJ#Z%SY8k@l&?D*&(s2~B^(6P!P8HtIh-BgDRbyu zr3j=+{u*5;PUnpL2Wi{KL(0v?z&l9O0pl)sa-q2W3F3zoeXnsh7J_11l183nF=xBS zM*M!;mUQ;@m0Y-bpb;6~N+-r(5Aa{GgNElEp!mchUQ&wpceFi%+^(Oln+9otAuQ>R z6H*G1{+uZ|5d*=Edbk76`$08)>&Za*u;)8eo z_50+v(aqteh#zKbZZaN_IdL3Fe!>pM)nTJ~fPjOiLmn;69V{u!sR?b*fEPJr%6ANo z)6|I_9KjTK0Vo%_Mkfpu<4L(lp;L6Dtuam3|E9wf2Pxt?*s;y58pcSxvqIr1ZdxWs zqwCBZ`a}%R0iUzstx^FnKu#A-jPRj%M}Z=N;jZcpP6*8F$929UQuMjZ#HY>SX8#1j zYH_1Xc`68W4u<%qIo$BE8YKF5m%KgZ#+{=G6r00akMP2&#kpYIl zuBq+{8XbNm#_O=1D44pAoK?RI34tRYNqX>){ey#OxYYc#rA(9{*v3oKUqO-Ef8!Y& zagdtI5!+bC+Lt&uhPa>9Y0$(7&i#6X2^9T3TjwpgAv}{fC&wF=*wdJ^{VUV�K;9 zU3cof(p8VLGq+9RcCbOs=(6oqwEDk-gA=w13T}@#Ut%gv2UtKQ8}9spk=dJH-k&M4 z7#J&QejX}TJ?^z@<0Hp@=@JK!-5BJb3uJSiCvJ7T0qL{U58|~|M{Odu62uwG22{d} zmBRs8dI#|j0t1zsG0;IF{dbp(qdJ@&V7C^Qgw8BzcHD&vtHlU#6R-=TW#bTVO#uGi z=I~n=Y0Ci+J$?IdBhht#-~pnBG(yex4BW(75k^=p(q;(|0}@btD-p05;EMIh>rCU0 z@ZuE=Th)et`I@ zACbmUE(8MT$@SaR6Z#D&5$y4!{;BH{MbPm@1%)P*+URVo0ATyKYTHR`nCi$)I)xgE zj`+gagwzQXx*{bC_-_a8IA2TOOEeEsLXs26&YN@KeAn`sSP)=E3F}rTIDbMArBD#` z?vBrWKi#FAi=3sVIDmgY|0*j{K!tF>dxLV#qijM-fW!b5uGctop4*!{D9s`4feCnb z$SD_D9-zX#&D1@XSK}1L!*d3td8GuuHU4La+m)t>@YEa1ioM)P9N@D^LoDU^T;l5Z z7Kt1oR^@v584u*^$h8wE?At`ZweDbbV;dD~u8L0^zu6}u&R|ww{`rjXY*L;{G0XIm zxLGz)8N?B66M6f7s~SbE@G4jTV#To*jHSq$q&NvdwCdWL&T(|&cDVWTF+!gzVw|{g zVd}|S(G?+t&%?a|%i%AVacGb<>VKFX}N6 zEB1Qt2S=MEJ`cOw5Z90pB}MU4$=G%)t~jj0kgYD1$VQJucsp&p^FHGW!q5Iq$*4$n zV7#`){#*I1>T==LX@?`e9Ui&rlml}EH~8{u=S-gTq~s5)P~2Tg5zyu^Hb%T}h-`@N z@{r>E@p*A7Xl9nee2P8LIJ>~?=0Q^}vz*nW66-+!>WWnXmOj~3Ik517FDU1SU=(W< zM-*2SkM{t|Y;j*#UoWilusMHj{ri%70px7n(|%%g>Dqw>uThcK*3$BdmcFm=uCr?G zsn=M|*M3eM?e%Q5)D-tFvXv_OZvVOATW)w`u|WE5N6)(S$aBFpyb6t3bQ0RCUbB@} zJl!dxpoM3nqJ;N`bDG%~kJzaDhmpIbsuQP|WP8>18AP(}%QZV5-MQH)A1MJ^w8eVd z+=qDQ{U;^O@v6S@ror?%+D%rDI%wxD znp5W`@|Jo4zN_j7T6#}cx3k6wATq!0-%maaBdF}yzbSaDyLkJS5u0Ik_4y?Z*m!6t zxSQf89_dG%Q=SRjS&yS#jvXv^ehxmOTC$uk`B81Qb{2NpKEL@qp*LrldFLVDJ!=Hu z1fCsN@)w>MWVhfeP~6;d16rc22Ahj{3uU8Qe3uH457ZCuVEYBeXZstrtNlhxSgrw3t2{ zUJZ!PBtgr8ou?C@7@qSfo$nm97Dx!iR?uD%%OfcMaqG1Os@2;{;=4-}VPyKOWU)N(9*r89zaq#ae`%1KyfEENrJAY({HW=#jpn=J!xS?US<8Hkt5S3+Kmh=T|637kVj_&nRJ`&Pw|Lr|{?_`JO&vxfSgm=l| zGFqs$*~_&)@2PuV{KS%dK2Y~Owu6srt#!-}H&)7h@%l*Ej%;{XaI@@BR Date: Thu, 21 Mar 2024 18:26:36 +0530 Subject: [PATCH 11/29] feat: Added Apply job in k8s plugin (#4828) * feat: Added Devtron CI Trigger Plugin v1.0.0 * feat: Added Apply JOB in k8s Plugin * modified structure * Added error handelling * Removed CI trigger plugin * Migration number changed * logo changed --- assets/devtron-logo-plugin.png | Bin 0 -> 10873 bytes scripts/sql/230_apply_job_ink8s.down.sql | 5 + scripts/sql/230_apply_job_ink8s_plugin.up.sql | 228 ++++++++++++++++++ 3 files changed, 233 insertions(+) create mode 100644 assets/devtron-logo-plugin.png create mode 100644 scripts/sql/230_apply_job_ink8s.down.sql create mode 100644 scripts/sql/230_apply_job_ink8s_plugin.up.sql diff --git a/assets/devtron-logo-plugin.png b/assets/devtron-logo-plugin.png new file mode 100644 index 0000000000000000000000000000000000000000..fc17baf72802401d58f7a778de5c79959d03e1ee GIT binary patch literal 10873 zcmdsd`9IWO^gm^?WgoI8OO_~0mXKwvA2D~CCj|+HTFGwLdHlY`_e?#kR|Jg zvXdku8QWxx^>ck5-ygnz!^h)+$Lrkf+#+#cOFfs5lP*G7a8Q#*pLq$bR zI{l?P2P6aE7ZSifdY@a?epFOmO{c%q-{)&D03o&C9RqEus$u?B@Im7SGl5Z2)uu8Y zIzp(Zt}q+w!tVV?y*{_#^}sTFVGB3%4x|2E)I_A?64HdH?;DNWW8-t79Cw4Cz1%&U zB#F#m=Z(`_rLIU9e5k9UEWfO@dQVvf+326UX6AGbcR+-$a+n`UtmmoG#Gl|H@BXG}|Sp^6NpPu@=erTrLms zxd!hwvc8z>y^5P0&aVwGTaibEzwNob`(8beN?g;J)l5{>yoG4bvGka!yLmV5#O{-i z1OZ!BQfWx>+Yuof_z^Y7M6S+$GM*uZtl7$~*4=zL#d*mb>VMB{O*FhW3^~kTF6R9G zvnW#XFC801;9G5t%7%vdM&nI?qi0*bY%Q%)QaL#gIy#8J(u&!y+>hcK_i3+mNe}eT zG|mUGwcUs&DVF9;|X&7yB}Q%b}BHKEBtM^8`K!_Z_=4LZoMdG z*dwJRP73Z+@xx>T?YRo2>CJt0i4qe(?;@vbLvIM*Gz?B7BJc5t`)9g2`eiw#Je>Mn zM#ZTQGf)YYQOleV7rsTR z<)k@IGzxJhsk8HU%fZ!TAG}P)1_XP_l)ydz_4^xceJ5ddu{m}$N&*P%^+ou9ru8+P zxyO2dm*Zcc%lU?+iF=;LMES{{@zdNeA1{~h`LmX4^i~c8E5V?8E=ReO4JX1CXxa8M zNK{L2bUCHt7rfa!s{V|w&^t~J+`{BjP2-`*632Th$pt$Zm;R_w4iy{ zHm7d)d=~BXW_gBF)5>lU|32KLnHSjIV$nCF_DY_7VfU4AV&06?;D>_)!7u$K0o1{0 zN#>P?MwONDftEnsPQ7)h9GU)_P$BQ8djowH=LcDEhTkt65>x+DpG9mk3!IfD)5LYx zz?)@$G3KSt#UE_>bkAPDTZNS|4BTtPDO3L!&!IY8iVH0f(t0}X;?CF|hLC}y`llt=p6U__4)?_o1~=tDibeJ% z25A%)y>TLboOr0aIa-V^)~sBX@;meSQ-2s`dpjn7!mK-#Us-FmTs9$iT!L^T@8pFO zg#ojO*=0Oiac0*zq38Z!>K>-;OC<6QDCR>BvvCOGSmXywRE-nv?>H*PF+&7OcS`*X zC;d`xoae^EXu$hsq-KlFof} z2Wq6Z8xIuteRg_@ovZ(Ha3>%ra+brsQ12NkC1bZPPoYzYSx0w$9-M*2mTckRRBDKT z%rbI#>P?K*IIqqV$$@l9dRpX|h!(8ZxN_b6S$LQJx;?`9L7V)UAkwgv)sO_i{qa=? z`n9tR+_nUo;NeG$9J zh%WHHXFTR{C;PAi)*)&4D*!{Tf#aQh!!ob}zOuVstM$|M6)m)J!nJ8v-SyFhsna`S zDiDEw3ifERL`t5SF>ip#@$YTa2JMvD!V6q3zWSG5EJyHpu#8rjJh%q6O-(l=3_1Sq zT^R7-)4nVGque^<;rW`Qi1>LPMm|x7ra&+y=-;Y}2VQ-R6c5k){{F3Q+njt;o!YR8 zq|+t1!#5JL<2A0k@mqK_#L7Kk?230(`34>Ob>einkE$l|LY`lGLd>_%_dR#BZ-%QJogM$W!M%P zt+o=Ul~ueOXdP&*@KD5d8ID(U1ZUEG3)^ZpFH|KFyy6w?3diGq469&RZ;j7;wZC-} znV}gCu>dtU;mxiiD#EOHz;cw;*2wo-O#(;l@q=$Mi8T`T@e^h@FezHELxND`4Ekhs zAD>UDt&w1bsJj>w>2tx*Ff2SoO|VtDGNlTsRA<2qN+9)}wyHgy`-GzxF|!G1bZi3X>3KG`wcvGaP5Od~!=9IoY#@}x zx}5BzyKX*=jPgjBo>}E#^bj>Xwm&VwgGGY-zz-k`Y*qQ$$(~8kl{5^&?qJ8cU7ng` zyD3y$R2SfzlGTQiUi`AuZS-Sa4jm`P)_m?X*bts!*HEfU=7Fe-TGw59hDZYnRixL} z=x#Xc!B`*zmaLIhVBX;(dD*o!o-Tag$k2f!!_8gKWidW)I%H)9S~Eh_X{@8zM1_}t zW0Clnqkc)L&hZwMRCc;f<{sEKzj+xTp@WjHEDftT{V3ke%B_Z6^>pcaCPA>sHUawg zv*D2DoUQd~mA64Vgkq=f(b@iQansF@6>0xW^~KyV$CoE8OW{+nNB#%zz`AP$JA{5^ zg{GkF^(K_#r;GUE2f3l7DzKa4Nzqp?LRt}_<6cZ~wajuh99d^XU#X`v`x(k!QQF~o zpziugm%b7y9%X-dRwXtVeD?f*pNg1W7;9@W3(x}`;`bs5LDTFxcBXAYn4hWaQ!oc0 z9S71^3w>fXr^d(i9Ays~QVkg{dXS6_gYJ3^1S{3aaRq{)u=E=kPa>MCGH%)+wzlR5 z1QM2S)pq6|_)JOmBs@oHn4W3?h7_(zW3uD!_-}74=>7tmmXshaV(UZte~qZ{)t`Sp zyH8RcUaqxVhPAyor}HC9;~JR8qwqQ&cu4KNCob0`WVm@DVf3!YI~E8IfWy5DpZ8Oo8N{q z=JaZNzU6jObT=cEBoEx>G#f6JmT&5kE^*m<`c-DDXCiXNJP{k#5;tFL$&u+S(`+aw z*!Lmi@8NxUa)<}A)l~B&r-z6b)-?~+|kw>DOt@C2XCvB zgsvl%_Tz40grJOItHVO5rsi_Yx1%9ynL)*F0fK1hSl< zI*pYjfx}_X;U-NSWy&mi;g#gYGyBV!-K+toj{mNPDqj_wO5e057qYl6r^k2%l@y_K z9Jf}Pql;MDUf5c4w^jUjMt{PJzxi@dkjjRII)fz-0DXx{BL z2VQ-n8G)(l)~u0g1BbZT#BAkG_@LPegSObj73;p#{FN|!jgEm~gE6Yy@&CN-FNcfF z=oa?*y`lM|@{<03{%P(S9ZC~$J!fx}aVs?Mvc(4D{-3aeFYl=pvYJ;FD-s2TFJrlKuHyE#q zc}oaG5n2TxVbJZZ*96`1dC8!=+B)M^#^9g|{I)sU;uD3^s94L>U02Vw{rtZ7m!YY< z$iw*QZ$W#*fyV(%g=x3jgo7jEQ5xq@sGl&wLzIKYhp#Ph$ZIBC7u8)yP)?nwMJ!4q z>G15Uu^@5GE;D8IX3f;G?t1WJK1O5G>tZy^m3F`3?JOd~AJ^dVz3bJw_p*kA&hnv^ z)ky%e;OGH3KQP~w7PY2nHA<|eNIm=coSFx8>;I_EVGtrSj-+Z$NkZg9iV=(Mx_~zS zjm$5K){n^kP@B<%UJQz&u$$}&CG9v-`Xv#-C2OzMZ?buM=v;7u=S{NyZz`c#U2&X@ z=75Kk{@9m}n=U4KAGc;5k(_%vV^UG!7eTHHH&C4eZk6uohxU(P7m8xoC>J7*BDoK8 zqh$V^b{}3Fx(o%rC$LCweUK?`CgQk(*t=H^OzQ?iaC@WQ`~cYGJl5hscCN#r1sgfq z1Pr;7P{nx{e~^YWeY(;TdD3Ba=c-?kfk>ZdseUKwKFR)MLM<3W{@prBd<&{M-r-$r zlFMEcSw|&J8(M9mJ`j$wq|2(14Exrg|FeE=YOFY&$adc;+w3_mriy~p$I{Y}BZXO& zjbxPt@v9%*Q?UZP3daR~(feE=1JQwLn)uAr_IDZ$F^5=N88XAwOs}h~E7#4riP%~t zp_{)SuGerOGKEv8Wf<(Y)9fEzG?ty6z>7=Y%v@AJ2I?NKnpujuuh@k{1GS>R zD;SQa?A&O|tLjPu*>#)~loVL0-$G+srgfjV>9((PVR}&>3zwjFrf7RQ0 z2S4M=LEXxD^=|UZ4|YBaQi^RcH`y$$3a&FbIJ-%7P#I2UNnP|lYNBj z)UTK%tF6GC=QWHrBSLi^gxz<7kDyw<@*BWLHuA^lCZ4OUEEbcgU2o-SB;3C){O^35 z{=(3 zr8AzwzBfiWIaqlY%4eNa!9+}WJ&&5VX_LQt;|mCi=l3UWD{qHK_FuAt86Ll1HeLC5 za;3Ka?;1OI0iU^YXW#RMRGu{GNVeyUR{zk%WXQW3bW|Te8pG zIB)(iE&e@c5S=i>fFAQCO06UA%Ue;|eTLw9I`JJ;;M%N;AU-EYc{Y_Clf(&EGh+7> znKcf#^z=E~K;k5YzG5rgVKxjlmTi>g+o^V_6w(@X@{Up)7K4&%m*#>l=;Cxj_EJJp zy?XDC=MrM;4i_ReOLrFN9t0?8-g=0@e32;}e5&ega8FG>Tz&Q55P6_Lc$tsqu6lBk zv2Dh7%X+vBOWwB^W>Fy;8}>3L^s|xvI|lSgRjAjKK??)VmJm!Ggu}~6T4JwKOEQTL zmV;rIA$^dM3!9qCnomm$Ru5tB)hge%$kJZ>3tr8lPBY#4eA};XIystu2^~M~U9f{* z-+H|EPwrPxCFZVh$e6_uS0T|yTeHUi=g<;nB5M;a>awV(zUdU+RpOc(^6A5ugR@Um zNDIx!371RBPo=$@Y+h8>ef|?S=NmuE$Ih(|QC~pXFR;DebCh_j+C}U1oJ%QZ&J1tD zqbSAqwBs_h(Ca%#@u4!!*PGs6sh2*es;TyMk{KLby;*+30!yr2+wNXw5tR*o^^^I} zd9546csk3**qrPKTLJWOQ(5pn@vl?1#CEdLi|*ONesW#qXyC2pldAZxs~>9>FH4+D z_F1num}p?X1oWWy1L?-LexoaF>!Q6)MplCiv&E!s^kJ1+mc#kowZO2Qw=TbZ<*Hkf zj2B+1)ivtzKZu&VU%cBj%|!p=b>Gqc?aMM*Se@uVbG<5na%!)Y5e#bbEMf9`AHB}CTz?H-C;h_J5#a})mB64 z(wci;Ceipi%4cuPuD(q%rm)%VS~O8#O|Lb%^`Aa=!8Ull$iQK+m3P3> z{G#Bul$8~)JDz?PvitxDydk+6b$3I9MA*~6F@F^!%9yv~oQw#IKy8%uKC}sAxrvKJnNo7o zeU~ydxEJVprTPOq3WwG8Ht;5IAai=_kNXS!GP??iW$$h3a~;@p+qW$*Cn48eG{<}- z?m$1z++qw)*1wS{-d_JesHa7SAh=f2+_zDntV^v2Q9rQ%S#K)BZ*=#I{uT1bQqJ-Z zPle6^=efJXPvF{cm~uyD6Q51`JUjY)CAL+_f&@^{8+KXk;Y+{x7VlrHOk@vc2p03@ z^cU+ax^0m-ojUNUn$dKk!q2-sGI5$yp;Mr=6fGFjkQ6QvZmD1%Q)YmR;ilXJ_*Bgb zCOL6>+3!iLusURJ!&s^Jk-Nw5oM0bS1D-fU*YEalphuCPNJ&+P{CCbK?nzz|K6uI% zgLRp1waA~+z=QV~-D$qUuzH1XfK!a`X#2eN1GU9aA~ii8NK>%w4-f(YC|M8``+g?Q zP)7+&XFn0Qf{NoEOY!15`#;w_}CY5o_%}y z^}Q_he_77RUpN~mpX5I-672>t0nXiOtCJZs%IHf)KgMo0J>d0~ZldTLT?2pb(4|($ zoQKJouitdDUl!8KB088Z)s{79jU>m1&n*vF{+zn?t{EMY`wf>iD4)S2!ZjZ<@#N}3 zxd8}Nb5=vh;pR~Xd{|_D)&b_+L7|*BND13y#0)iATYgpZu;Y+y1|qY*`;1M&UAt|~byIXu#r+Z3U zZORq1Qno4)^uYT3p5oDdCYEAT2@nzqCd=9uBr}M1e zaV~P97gcDEw@8i-&?HHFb@+mPN~>)C%eZ7066a0nx0WEh2DnkiwvqmXnvHhSvX&!^O(H zuXeQFvtNxzh3CJjt$Ac5CjZLaDQlze!=l2ieU~J#l0D74P9QnsgR~Za9)D`!8nkQoEetnV<)6aK`0J_foT2NphzP-++ zbWLd9()4am^F@R4`NV+gG!1Be0%|_-ZS!hUbh3^QIV*lzZExZ-0XUe8ZN-jc{UQlM zt1K6n5WSazMBcdSo(V#z^_nEpfaSg(Oq0pkkqyOKyc=bL5U(+;ypUa;e->OTfF^XE ziPOks>b!B&hZ#038ebNNgW4qsZeZ?#D1u!9YlDyV&aF-Myz$`L^zkZ&eSO%5!BZdO z-Z}r=C9fvoBCkcGJ47o%#|#_qw%B;=(E|y>k^7v9o3wP<e%sO zIIpB22dAEj-MclRuH5=Q;qCr{W$~lrz1MJeXqOMWChf-}j@-}v?dfNc zcHm)R*q03tdu#{R5Y&71N9;Vt@;vcT?YZzs?1M{0fvK{7qzrh+5xc&M`U328mz zq2dErJP!V@fBT^Sp8$M>*yQ~wz>r;FNyFAb`NYW4)Q5q0Q6Jpj87OM*OePvcoq%?w zjCNBkMfX*NCQG}rP}=$-x#uQF-cixNiJ_{4&reU3*Bp@Q8SjrPo^~&^ODzm|B?MG+ zG`fXzt+jpd{?di~01<(~1hdxiQg_bP|HyT<4ze}$*7{)s57EKXE};J;w8-qaYSQfq zAFsUCjF0$rd@{(e?>tx*y0zPjN-ILACcHf;(cM_OU^1@Z6{bPQpOb4yCLhn1&|R3@ zSm2mXW%;DH9}qJ#H8R{uT3TOdp|d;8)!h~O(a-)o!@dONQJwVNy0oF$dm&la z{>e_roaaScF)Kh+oTa5>Mk}hn3NYag-9+FajsSMrJoPIk1a3y#s_3DhrZ+`7b7>0S zd?w#XH_aw)Cl09?>U9anxYZTf|MCj%*v6`RN9D0=2}-5ggFL$SM$v_6fMTW&${ZBE z)TslB@Ra~ij5RRa$n)%ELtIP*)b zj&txDRd8M5D4iuUL+~h|LDu5thca)wKwrZ}5U~i#c2*1;3cyE~-1I3zU_2o#DsH;S z)u=Lek#}D?iIHix!lwi;~r;2Ki zGp{EdWs-H4%>1#HELT4Mf93t`;g#r@iPMjx_@o&28)bphN;Ct|v-R4rO0ti$-Y*T) zQ)?tQtyle>coa7u(x?0mP_E`u-ccbsviCtR@u2s<3CtOUwIRY1;%opDRPO2gqYtz^ zw0HopL4bAT2{M*1d8s-(&QP#+o;p;x2jk&FuJ$2%0095A0Q^ja2AF3hi21iSZd(B1 zv)Y>d)1}C+zQ+t{y~52{P77Bf)z7L_Ut|c*lI6M|TT$i6aLI?^fG64KTbDYi4DP3_ z{>-N>jvPzQA z?kjX!@IqP>F19pF_5AsdVc5mucLV9dM_@@?EV9pRq#fR=G6Y*t2zD;EC^u4S^gnpB zTvA)~qicwqk;hTxN~}`t$g4W2jXR{qe34=|AyukGafKzuwrzq5ui%^47l3zvdace zX}-MW;UK{I6@3Lk*RX8vQo~**R)7u7cFkX(_1ejCknqe`xaWhP=HMcxff)Itb_V+h zIXkkzheG>mv*cFQ)*!E_aAji!_N~b~NL$vM48f;|njcL1l6O?|o5ky*!BBrDh2-eU z{yEZp%Md^|S+!y4A$Dg?oE^)bDi;7caW7y020(&ruz*Z@>*6#xss>lXJVZ)O#U(rk zd>B}U`!1#cLUQ2;kW=EDKs_`A#Et{B@1Ex~Ce2q|FBjMcv%~}j0H#isE70fEq$arP z&tQL_5<%A8NlOw0UO-ia-0e^QU$gr24P5mD0IBu{q(3f?CnoFOFPg3_dWN4pS3k7P z*$R{qXgqWIk)`&8Lbr17RnOsO!D!%4u7N}ps|TAf8nCi*p3f1cVCP{C$p8 zQ;nVMN|Xl(Yn&cn5A_?l&GF8x$5xZ0frTf3$Usbs94-cwa5TuNx*l7lMXu}s;n`## zxgvD0_BX3Zp~EdCkm$C7CkJuoAnIZimWHV`tGOv5gn@2SdVCt;0^o?PcWa}1QXXoh zq)wlZ3#$MY#}FhkTk@&8;%AElq4sfdG}D>};t!_FQGMohrZmNf>Yr5`yA(rk67$z) znBK;SN7s|oozk7wzX0$Q_AS(Jya71jU%j15>rd#Pw|g}x{nAR(JSJkKelJ%!foM$p zHF1yaGN8A^*XOHwf}}0n)wC^_B- zjn*j(QVx4LrUF-Ua$1%fu>1kX`@UB`;(M1Qw4?)!Qq-i2k1JXmJYA^U(1ez;x3aJa zvg1UqNIP^GL;c_HHP3&bEP}stwBzV4p|dWmS%zNw zo3;qQDR{Yaw;bDU@l`D#ulH{k)K3t%aq{07=Z-bC)29MI|I_yo8sTQU)lGEaCJbto z&B$lDt*WGuPnySGMipQ;Mct!xkowQ+rEeA>7bFYY+?Du_Lwc0wc@93d+_%kM7P?S< z_=hYU8{+X-#eg3m9?CQX@NVar()oI_-47D9+wGt-LQV}h0py@I5zzI8+p)Xaja`c7mO-8)9tz>_`hG$*VKxylx z2jX;}4t}nCoFRdC74FhcO!{}61%B(bFNq)w?mZYMS{82;`AgAgk=u-b28p|+I-;nU zAB{@cl=d=OGXl;IAanftT{y+4Qb|9kpKh1Ix|W~>fi7{w(=3|&J1#QophejLq{egu z#1Gyrc^q2m7jUJkbyQ&(w1_mW>oGOP+*z1^7<5@9I_O~lZ7V7lSkZ$SJamuIp-Crw zpjrWMo~&+f`%&KmH1Ihc811wEzKwik8g43xfa4aUJrxe|SEct87~%GAF%>UmSxTW3 zaK!#JJvFNA3?H#`qWQyePzKe`bb5#YL+%VHwSxLbE!TV)3h^g6ZS`f;cf zS;eaY9dv0!&VdOk3|cUQmxq#b5wd3%YlsUC@l){Au>)Q--EEp;LvR00aoH`ac<@^Y z8eN1Ic>rMcsdgi{K{QTycLL$P{EprHl86?;>oCx$E`FhLWH|B6y-1l~MBIy`B<++s zb9_;x%%2a(SqH2IoNd)|8qlXnOoE|!*R3zvyA6rwtvKDhz!XAA6Gh?*Gc;#nX8t4V=&_B??LuxO1F8{WjDy K)veNgi25J-N0`?D literal 0 HcmV?d00001 diff --git a/scripts/sql/230_apply_job_ink8s.down.sql b/scripts/sql/230_apply_job_ink8s.down.sql new file mode 100644 index 0000000000..23b6390e49 --- /dev/null +++ b/scripts/sql/230_apply_job_ink8s.down.sql @@ -0,0 +1,5 @@ +DELETE FROM plugin_step_variable WHERE plugin_step_id =(SELECT id FROM plugin_metadata WHERE name='Apply JOB in k8s v1.0.0'); +DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Apply JOB in k8s v1.0.0'); +DELETE FROM plugin_pipeline_script WHERE id=(SELECT id FROM plugin_metadata WHERE name='Apply JOB in k8s v1.0.0'); +DELETE FROM plugin_stage_mapping WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Apply JOB in k8s v1.0.0'); +DELETE FROM plugin_metadata WHERE name ='Apply JOB in k8s v1.0.0'; \ No newline at end of file diff --git a/scripts/sql/230_apply_job_ink8s_plugin.up.sql b/scripts/sql/230_apply_job_ink8s_plugin.up.sql new file mode 100644 index 0000000000..7a0954a2da --- /dev/null +++ b/scripts/sql/230_apply_job_ink8s_plugin.up.sql @@ -0,0 +1,228 @@ +INSERT INTO plugin_metadata (id,name,description,type,icon,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_metadata'),'Apply JOB in k8s v1.0.0','Apply custom jobs in k8s cluster.','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/devtron-logo-plugin.png',false,'now()',1,'now()',1); + + +INSERT INTO plugin_stage_mapping (id,plugin_id,stage_type,created_on,created_by,updated_on,updated_by)VALUES (nextval('id_seq_plugin_stage_mapping'), +(SELECT id from plugin_metadata where name='Apply JOB in k8s v1.0.0'), 0,'now()',1,'now()',1); + + +INSERT INTO "plugin_pipeline_script" ("id", "script","type","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES ( nextval('id_seq_plugin_pipeline_script'), +E'#!/bin/sh +RUN_MIGRATION=$(echo $CI_CD_EVENT | jq -r \'.commonWorkflowRequest.extraEnvironmentVariables.RUN_MIGRATION\') +echo $RUN_MIGRATION +if [ "$RUN_MIGRATION" == "true" ]; then + # Configuration variables + NAMESPACE=$Namespace + NAME=$JobName + RUN_COMMAND=$RunCommand + BUILD_ARC=$BuildArch + SERVICE_ACCOUNT=$ServiceAccount + HEALTH_ENDPOINT=$HealthEndpoint + ENV_PATH=$EnvPath + JOB_TEMPLATE=$JobTemplatePath + MAX_ATTEMPTS=$MaxAttempts + SLEEP_TIME=$SleepTime + + if [ -z "$NAMESPACE" ];then + echo "Exiting due to Namespace not specified". + exit 1 + elif [ -z "$NAME" ];then + echo "Exiting due to JobName not specified". + exit 1 + elif [ -z "$RUN_COMMAND" ];then + echo "Exiting due to RunCommand not specified". + exit 1 + elif [ -z "$BUILD_ARC" ];then + echo "Exiting due to BuildArch not specified". + exit 1 + elif [ -z "$SERVICE_ACCOUNT" ];then + echo "Exiting due to ServiceAccount not specified". + exit 1 + elif [ -z "$HEALTH_ENDPOINT" ];then + echo "Exiting due to HealthEndpoint not specified". + exit 1 + elif [ -z "$ENV_PATH" ];then + echo "Exiting due to EnvPath not specified". + exit 1 + elif [ -z "$KubeConfig" ];then + echo "Exiting due to KubeConfig not specified". + exit 1 + fi + + if [ -z "$MAX_ATTEMPTS" ];then + echo "MaxAttempts not specified using the default one i.e. 20" #Will set these values in SQL + fi + if [ -z "$SLEEP_TIME" ];then + echo "SleepTime not specified using the default one i.e. 15" #Will set these values in SQL + fi + + echo "Running migration job" + + # Devtron Config + cd /devtroncd + touch kubeconfig.yaml + touch kubeconfig.txt + echo $KubeConfig > kubeconfig.txt + cat kubeconfig.txt | base64 -d > kubeconfig.yaml + + # Get the system architecture + architecture=$(uname -m) + + # Check if the architecture is AMD or ARM + if [[ $architecture == "x86_64" || $architecture == "amd64" ]]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + elif [[ $architecture == "aarch64" || $architecture == "arm64" ]]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + else + echo "Unknown system architecture: $architecture" + exit 1 + fi + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + + # Custom Variables + export tag=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerImageTag) + export repo=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRepository) + export registry=$(echo $CI_CD_EVENT | jq --raw-output .commonWorkflowRequest.dockerRegistryURL) + + echo $registry/$repo:$tag + IMAGE_TAG=$registry/$repo:$tag + + + if [ $JobTemplateScoped ];then + echo "Using JOB template from scoped variable" + touch job-template.yaml + touch temp.txt + echo $JobTemplateScoped > temp.txt + cat temp.txt | base64 -d > job-template.yaml + else + if [ $JOB_TEMPLATE ];then + echo "Using external job template from repo" + touch job-template.yaml + echo "Path to jobtemplate: $JOB_TEMPLATE" + cat $JOB_TEMPLATE > job-template.yaml + else + echo "Using internal job template" + echo "No job template specified. Using the default" + touch jobtemplate.txt + touch job-template.yaml + default_job_template="YXBpVmVyc2lvbjogYmF0Y2gvdjEKa2luZDogSm9iCm1ldGFkYXRhOgogIG5hbWU6IFZBUi1KT0ItTkFNRS1SQU5ET00tU1RSSU5HCiAgbmFtZXNwYWNlOiBWQVItTkFNRVNQQUNFCnNwZWM6CiAgYmFja29mZkxpbWl0OiAwCiAgYWN0aXZlRGVhZGxpbmVTZWNvbmRzOiAxMDgwMAogIHRlbXBsYXRlOgogICAgc3BlYzoKICAgICAgYWZmaW5pdHk6CiAgICAgICAgbm9kZUFmZmluaXR5OgogICAgICAgICAgcmVxdWlyZWREdXJpbmdTY2hlZHVsaW5nSWdub3JlZER1cmluZ0V4ZWN1dGlvbjoKICAgICAgICAgICAgbm9kZVNlbGVjdG9yVGVybXM6CiAgICAgICAgICAgICAgLSBtYXRjaEV4cHJlc3Npb25zOgogICAgICAgICAgICAgICAgICAtIGtleTogbm9kZXR5cGUKICAgICAgICAgICAgICAgICAgICBvcGVyYXRvcjogSW4KICAgICAgICAgICAgICAgICAgICB2YWx1ZXM6CiAgICAgICAgICAgICAgICAgICAgICAtIFZBUi1CVUlMRC1BUkMKICAgICAgY29udGFpbmVyczoKICAgICAgLSBuYW1lOiBWQVItSk9CLU5BTUUKICAgICAgICBpbWFnZTogVkFSLUlNQUdFLVRBRwogICAgICAgIGFyZ3M6CiAgICAgICAgICAtIC9iaW4vc2gKICAgICAgICAgIC0gLWMKICAgICAgICAgIC0gVkFSLU1JR1JBVElPTi1SVU4tQ09NTUFORAogICAgICAgIGVudjoKICAgICAgICAgIC0gbmFtZTogRU5WX1BBVEgKICAgICAgICAgICAgdmFsdWU6IFZBUi1FTlYtUEFUSAogICAgICAgICAgLSBuYW1lOiBWQVVMVF9UT0tFTgogICAgICAgICAgICB2YWx1ZUZyb206CiAgICAgICAgICAgICAgc2VjcmV0S2V5UmVmOgogICAgICAgICAgICAgICAgbmFtZTogdmF1bHQtc2VjcmV0CiAgICAgICAgICAgICAgICBrZXk6IFZBVUxUX1RPS0VOCiAgICAgICAgICAtIG5hbWU6IFZBVUxUX1VSTAogICAgICAgICAgICB2YWx1ZUZyb206CiAgICAgICAgICAgICAgY29uZmlnTWFwS2V5UmVmOgogICAgICAgICAgICAgICAgbmFtZTogdmF1bHQtdXJsCiAgICAgICAgICAgICAgICBrZXk6IFZBVUxUX1VSTAogICAgICAgIHJlc291cmNlczoKICAgICAgICAgIGxpbWl0czoKICAgICAgICAgICAgY3B1OiAiMiIKICAgICAgICAgICAgbWVtb3J5OiA0R2kKICAgICAgICAgIHJlcXVlc3RzOgogICAgICAgICAgICBjcHU6ICIyIgogICAgICAgICAgICBtZW1vcnk6IDRHaQogICAgICByZXN0YXJ0UG9saWN5OiBOZXZlcgogICAgICBzZXJ2aWNlQWNjb3VudE5hbWU6IFZBUi1TRVJWSUNFLUFDQ09VTlQ=" + echo $default_job_template > jobtemplate.txt + cat jobtemplate.txt | base64 -d > job-template.yaml + fi + + fi + + set +o pipefail + RANDOM_STRING=$(openssl rand -base64 6 | tr -dc a-z | fold -w 4 | head -n 1) + set -o pipefail + + JOB_NAME="${NAME}-${RANDOM_STRING}" + + sed -i -e "s|VAR-JOB-NAME-RANDOM-STRING| ${JOB_NAME}|g" \\ + -e "s|VAR-JOB-NAME|${NAME}|g" \\ + -e "s|VAR-NAMESPACE|${NAMESPACE}|g" \\ + -e "s|VAR-BUILD-ARC|${BUILD_ARC}|g" \\ + -e "s|VAR-IMAGE-TAG|${IMAGE_TAG}|g" \\ + -e "s|VAR-ENV-PATH|${ENV_PATH}|g" \\ + -e "s|VAR-SERVICE-ACCOUNT|${SERVICE_ACCOUNT}|g" \\ + -e "s|VAR-MIGRATION-RUN-COMMAND|${RUN_COMMAND}|g" job-template.yaml + + FILE_PATH=job-template.yaml + cat job-template.yaml + + echo "Applying job YAML..." + kubectl apply -f "$FILE_PATH" --kubeconfig /devtroncd/kubeconfig.yaml + + echo "Waiting for the pod to be in the Running state..." + for ATTEMPT in $(seq 1 $MAX_ATTEMPTS); do + echo "Checking pod status, attempt $ATTEMPT of $MAX_ATTEMPTS..." + POD_NAME=$(kubectl get pods --kubeconfig /devtroncd/kubeconfig.yaml -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath=\'{.items[0].metadata.name}\') + if [ -z "$POD_NAME" ]; then + echo "Pod not found yet. Waiting..." + sleep $SLEEP_TIME + continue + fi + + POD_STATUS=$(kubectl get pod --kubeconfig /devtroncd/kubeconfig.yaml $POD_NAME -n $NAMESPACE -o jsonpath=\'{.status.phase}\') + if [ "$POD_STATUS" = "Running" ]; then + echo "Pod $POD_NAME is running." + POD_RUNNING=1 + break + else + echo "Pod $POD_NAME is not ready yet. Status: $POD_STATUS" + sleep $SLEEP_TIME + fi + done + + if [ "$POD_STATUS" != "Running" ]; then + echo "Pod did not reach running state within the allowed attempts." + exit 1 + fi + + # Perform health check + echo "Performing health check..." +# Ensure the loop only starts if the pod is in a Running state + if [ $POD_RUNNING -eq 1 ]; then + POD_IPS=$(kubectl get pods --kubeconfig /devtroncd/kubeconfig.yaml -n $NAMESPACE --selector=job-name=$JOB_NAME -o jsonpath=\'{.items[*].status.podIP}\') + echo "Pod IPs: $POD_IPS" + + HEALTHY=0 + echo "Performing health check..." + for ATTEMPT in $(seq 1 $MAX_ATTEMPTS); do + FULL_URL="http://$POD_IPS$HEALTH_ENDPOINT" + echo "Checking URL: $FULL_URL" + STATUS=$(curl -s -o /dev/null -w "%{http_code}" "$FULL_URL") || true + echo "Attempt $ATTEMPT: Received status $STATUS" + + if [ "$STATUS" = "200" ]; then + echo "Pod health check PASSED" + HEALTHY=1 + break + elif [ "$STATUS" = "000" ]; then + echo "Attempt $ATTEMPT: Unable to connect to the pod. Retrying..." + else + echo "Attempt $ATTEMPT: Waiting for pod to become healthy... Status: $STATUS" + fi + sleep $SLEEP_TIME + done + + if [ $HEALTHY -ne 1 ]; then + echo "Pod health check FAILED after $MAX_ATTEMPTS attempts" + kubectl delete job $JOB_NAME -n $NAMESPACE --kubeconfig /devtroncd/kubeconfig.yaml + exit 1 + fi + else + echo "Pod did not reach healthy state within the allowed attempts." + exit 1 + fi + + + echo "Migration completed successfully." +else + echo "Skipping Migration" +fi +' +, + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Apply JOB in k8s v1.0.0'),'Step 1','Running the plugin','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'Namespace', 'STRING','The namespace where the JOB is to be applied.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'JobName', 'STRING','The name of the JOB to run','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'RunCommand', 'STRING','Run command for the JOB','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'BuildArch', 'STRING','Build architecture.', 't','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'ServiceAccount', 'STRING','Service account.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'HealthEndpoint', 'STRING','Health endpoint for health-check.', 't','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'EnvPath', 'STRING','Path of env variables.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'KubeConfig', 'STRING','base64 encoded kubeconfig thorugh scoped variable', 't','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'JobTemplateScoped','STRING','base64 encoded job template through scoped variable. Will use default if not provided.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'JobTemplatePath','STRING','Path of the JOB template.Will use default if not provided.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'MaxAttempts', 'NUMBER','Maximum attempts to check the JOB status.','t','t',20, null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Apply JOB in k8s v1.0.0' and ps."index"=1 and ps.deleted=false),'SleepTime', 'NUMBER','Time interval between each health check.','t','t',15, null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); \ No newline at end of file From c7d2add1f2d805a11dd27a44c3470037433f44ab Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Thu, 21 Mar 2024 18:26:46 +0530 Subject: [PATCH 12/29] feat: Information of Linked CI Pipelines on Parent CI Pipeline (#4786) * Refactor ArgoUserService.go by adding a TODO * wip * updated API spec * Update repo and service * Add logging of errors * modified API spec * added total count in query * updated service and rest handler layer * Update router and resthandler * set default req params at rest handler * moved generics to global util * Update resthandler * added tracing for new queries * fixed undefined ctx * Update handler's rbac * update error logs * Update cipipeline repo * added: comments for changes * Change handler * fixed typo and imports * fixed adapter and constants * updated test file errors and mock files * updated API specs * fixed API specs end points * fixed: query errors * fixed: linked cd condition * fixed: typo * fixed: search * fixed nil check for runner query * fixed: append in adapter * fix searchkey to lowercase * Fix duplicated env names * fix pass env array as empty * Fix error logs * fix error * fixed import --------- Co-authored-by: komalreddy3 --- api/appbean/AppDetail.go | 11 +- api/restHandler/CoreAppRestHandler.go | 11 +- .../configure/BuildPipelineRestHandler.go | 185 ++++++++--- .../DeploymentPipelineRestHandler.go | 68 ++-- .../configure/PipelineConfigRestHandler.go | 31 +- .../configure/PipelineConfigRouter.go | 3 + client/cron/CiTriggerCron.go | 2 +- .../telemetry/TelemetryEventClientExtended.go | 28 +- cmd/external-app/wire_gen.go | 2 +- .../pipelineConfig/CdWorfkflowRepository.go | 24 ++ .../pipelineConfig/CiPipelineRepository.go | 190 +++++++---- .../pipelineConfig/PipelineRepository.go | 17 + .../pipelineConfig/bean/CiPipelineBean.go | 15 + .../mocks/CiPipelineRepository.go | 295 +++++++++++++++++- pkg/appClone/batch/Mocks_test.go | 24 +- pkg/bean/app.go | 103 +++--- .../devtronApps/PreStageTriggerService.go | 31 +- pkg/pipeline/AppArtifactManager.go | 5 +- pkg/pipeline/BuildPipelineConfigService.go | 29 +- pkg/pipeline/BuildPipelineSwitchService.go | 2 +- pkg/pipeline/CdHandler.go | 76 +++-- pkg/pipeline/CiBuildConfigService.go | 18 +- pkg/pipeline/CiCdPipelineOrchestrator.go | 136 ++++++-- pkg/pipeline/CiLogService.go | 4 +- pkg/pipeline/CiService.go | 92 +++--- pkg/pipeline/CiTemplateService.go | 3 +- pkg/pipeline/CiTemplateService_test.go | 61 ++-- pkg/pipeline/WorkflowServiceIT_test.go | 21 +- pkg/pipeline/adapter/adapter.go | 75 +++-- .../bean/{ => CiPipeline}/CiBuildConfig.go | 14 +- .../bean/CiPipeline/SourceCiDownStream.go | 21 ++ pkg/pipeline/bean/CiTemplateBean.go | 8 +- pkg/pipeline/bean/pipelineStage.go | 6 + .../history/CiPipelineHistoryService_test.go | 5 +- .../history/ciTemplateHistoryService_test.go | 5 +- pkg/pipeline/mocks/CiBuildConfigService.go | 2 +- pkg/pipeline/types/Workflow.go | 3 +- pkg/workflow/dag/WorkflowDagExecutor.go | 3 +- .../downstream-linked-ci-view-spec.yaml | 185 +++++++++++ util/argo/ArgoUserService.go | 4 +- .../pagination/GenericPaginatedResponse.go | 61 ++++ wire_gen.go | 42 +-- 42 files changed, 1423 insertions(+), 498 deletions(-) create mode 100644 internal/sql/repository/pipelineConfig/bean/CiPipelineBean.go rename pkg/pipeline/bean/{ => CiPipeline}/CiBuildConfig.go (84%) create mode 100644 pkg/pipeline/bean/CiPipeline/SourceCiDownStream.go create mode 100644 specs/ci-pipeline/ciPipelineDownstream/downstream-linked-ci-view-spec.yaml create mode 100644 util/response/pagination/GenericPaginatedResponse.go diff --git a/api/appbean/AppDetail.go b/api/appbean/AppDetail.go index 0e46233476..9051141364 100644 --- a/api/appbean/AppDetail.go +++ b/api/appbean/AppDetail.go @@ -5,6 +5,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" ) type AppDetail struct { @@ -45,11 +46,11 @@ type GitMaterial struct { } type DockerConfig struct { - DockerRegistry string `json:"dockerRegistry" validate:"required"` - DockerRepository string `json:"dockerRepository" validate:"required"` - CiBuildConfig *bean.CiBuildConfigBean `json:"ciBuildConfig"` - DockerBuildConfig *DockerBuildConfig `json:"dockerBuildConfig,omitempty"` // Deprecated, should use CiBuildConfig for development - CheckoutPath string `json:"checkoutPath"` + DockerRegistry string `json:"dockerRegistry" validate:"required"` + DockerRepository string `json:"dockerRepository" validate:"required"` + CiBuildConfig *CiPipeline.CiBuildConfigBean `json:"ciBuildConfig"` + DockerBuildConfig *DockerBuildConfig `json:"dockerBuildConfig,omitempty"` // Deprecated, should use CiBuildConfig for development + CheckoutPath string `json:"checkoutPath"` } type DockerBuildConfig struct { diff --git a/api/restHandler/CoreAppRestHandler.go b/api/restHandler/CoreAppRestHandler.go index 969c891307..3cc040d4e7 100644 --- a/api/restHandler/CoreAppRestHandler.go +++ b/api/restHandler/CoreAppRestHandler.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" app2 "github.com/devtron-labs/devtron/api/restHandler/app/pipeline/configure" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "net/http" "strconv" "strings" @@ -1322,9 +1323,9 @@ func (handler CoreAppRestHandlerImpl) createDockerConfig(appId int, dockerConfig dockerBuildConfig := dockerConfig.DockerBuildConfig if dockerBuildConfig != nil { dockerConfig.CheckoutPath = dockerBuildConfig.GitCheckoutPath - dockerConfig.CiBuildConfig = &bean2.CiBuildConfigBean{ - CiBuildType: bean2.SELF_DOCKERFILE_BUILD_TYPE, - DockerBuildConfig: &bean2.DockerBuildConfig{ + dockerConfig.CiBuildConfig = &CiPipeline.CiBuildConfigBean{ + CiBuildType: CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{ DockerfilePath: dockerBuildConfig.DockerfileRelativePath, DockerBuildOptions: dockerBuildConfig.DockerBuildOptions, Args: dockerBuildConfig.Args, @@ -1544,7 +1545,7 @@ func (handler CoreAppRestHandlerImpl) createWorkflows(ctx context.Context, appId //Creating CI pipeline starts ciPipeline, err := handler.createCiPipeline(appId, userId, workflowId, workflow.CiPipeline) if err != nil { - if err.Error() == bean2.PIPELINE_NAME_ALREADY_EXISTS_ERROR { + if err.Error() == CiPipeline.PIPELINE_NAME_ALREADY_EXISTS_ERROR { handler.logger.Errorw("service err, DeleteAppWorkflow ", "err", err) return err, http.StatusBadRequest } @@ -1672,7 +1673,7 @@ func (handler CoreAppRestHandlerImpl) createCiPipeline(appId int, userId int32, ParentCiPipeline: ciPipelineData.ParentCiPipeline, ParentAppId: ciPipelineData.ParentAppId, LinkedCount: ciPipelineData.LinkedCount, - PipelineType: bean2.PipelineType(ciPipelineData.PipelineType), + PipelineType: CiPipeline.PipelineType(ciPipelineData.PipelineType), }, } diff --git a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go index 7117c76b0e..64f2ad9c53 100644 --- a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go @@ -10,6 +10,10 @@ import ( "strconv" "strings" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" + "github.com/devtron-labs/devtron/util/response/pagination" + "github.com/gorilla/schema" + "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/client/gitSensor" @@ -64,6 +68,10 @@ type DevtronAppBuildRestHandler interface { GetCiPipelineByEnvironment(w http.ResponseWriter, r *http.Request) GetCiPipelineByEnvironmentMin(w http.ResponseWriter, r *http.Request) GetExternalCiByEnvironment(w http.ResponseWriter, r *http.Request) + // GetSourceCiDownStreamFilters will fetch the environments attached to all the linked CIs for the given ciPipelineId + GetSourceCiDownStreamFilters(w http.ResponseWriter, r *http.Request) + // GetSourceCiDownStreamInfo will fetch the deployment information of all the linked CIs for the given ciPipelineId + GetSourceCiDownStreamInfo(w http.ResponseWriter, r *http.Request) } type DevtronAppBuildMaterialRestHandler interface { @@ -89,7 +97,7 @@ type ImageTaggingRestHandler interface { GetImageTaggingData(w http.ResponseWriter, r *http.Request) } -func (handler PipelineConfigRestHandlerImpl) CreateCiConfig(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CreateCiConfig(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -139,7 +147,7 @@ func (handler PipelineConfigRestHandlerImpl) CreateCiConfig(w http.ResponseWrite common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) UpdateCiTemplate(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) UpdateCiTemplate(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -181,7 +189,7 @@ func (handler PipelineConfigRestHandlerImpl) UpdateCiTemplate(w http.ResponseWri common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) UpdateBranchCiPipelinesWithRegex(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) UpdateBranchCiPipelinesWithRegex(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -238,7 +246,7 @@ func (handler PipelineConfigRestHandlerImpl) UpdateBranchCiPipelinesWithRegex(w common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) parseSourceChangeRequest(w http.ResponseWriter, r *http.Request) (*bean.CiMaterialPatchRequest, int32, error) { +func (handler *PipelineConfigRestHandlerImpl) parseSourceChangeRequest(w http.ResponseWriter, r *http.Request) (*bean.CiMaterialPatchRequest, int32, error) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -256,7 +264,7 @@ func (handler PipelineConfigRestHandlerImpl) parseSourceChangeRequest(w http.Res return &patchRequest, userId, nil } -func (handler PipelineConfigRestHandlerImpl) parseBulkSourceChangeRequest(w http.ResponseWriter, r *http.Request) (*bean.CiMaterialBulkPatchRequest, int32, error) { +func (handler *PipelineConfigRestHandlerImpl) parseBulkSourceChangeRequest(w http.ResponseWriter, r *http.Request) (*bean.CiMaterialBulkPatchRequest, int32, error) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -279,7 +287,7 @@ func (handler PipelineConfigRestHandlerImpl) parseBulkSourceChangeRequest(w http return &patchRequest, userId, nil } -func (handler PipelineConfigRestHandlerImpl) authorizeCiSourceChangeRequest(w http.ResponseWriter, patchRequest *bean.CiMaterialPatchRequest, token string) error { +func (handler *PipelineConfigRestHandlerImpl) authorizeCiSourceChangeRequest(w http.ResponseWriter, patchRequest *bean.CiMaterialPatchRequest, token string) error { handler.Logger.Debugw("update request ", "req", patchRequest) app, err := handler.pipelineBuilder.GetApp(patchRequest.AppId) if err != nil { @@ -306,7 +314,7 @@ func (handler PipelineConfigRestHandlerImpl) authorizeCiSourceChangeRequest(w ht return nil } -func (handler PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdAndEnvironmentId(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdAndEnvironmentId(w http.ResponseWriter, r *http.Request) { patchRequest, userId, err := handler.parseSourceChangeRequest(w, r) if err != nil { handler.Logger.Errorw("Parse error, PatchCiMaterialSource", "err", err, "PatchCiMaterialSource", patchRequest) @@ -334,7 +342,7 @@ func (handler PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdAndEn common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdsAndEnvironmentId(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdsAndEnvironmentId(w http.ResponseWriter, r *http.Request) { bulkPatchRequest, userId, err := handler.parseBulkSourceChangeRequest(w, r) if err != nil { handler.Logger.Errorw("Parse error, PatchCiMaterialSource", "err", err, "PatchCiMaterialSource", bulkPatchRequest) @@ -351,7 +359,7 @@ func (handler PipelineConfigRestHandlerImpl) PatchCiMaterialSourceWithAppIdsAndE common.WriteJsonResp(w, err, bulkPatchResponse, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) PatchCiPipelines(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) PatchCiPipelines(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -409,8 +417,8 @@ func (handler PipelineConfigRestHandlerImpl) PatchCiPipelines(w http.ResponseWri ciConfigRequest := bean.CiConfigRequest{} ciConfigRequest.DockerRegistry = emptyDockerRegistry ciConfigRequest.AppId = patchRequest.AppId - ciConfigRequest.CiBuildConfig = &bean1.CiBuildConfigBean{} - ciConfigRequest.CiBuildConfig.CiBuildType = bean1.SKIP_BUILD_TYPE + ciConfigRequest.CiBuildConfig = &CiPipeline.CiBuildConfigBean{} + ciConfigRequest.CiBuildConfig.CiBuildType = CiPipeline.SKIP_BUILD_TYPE ciConfigRequest.UserId = patchRequest.UserId if patchRequest.CiPipeline == nil || patchRequest.CiPipeline.CiMaterial == nil { handler.Logger.Errorw("Invalid patch ci-pipeline request", "request", patchRequest, "err", "invalid CiPipeline data") @@ -431,7 +439,7 @@ func (handler PipelineConfigRestHandlerImpl) PatchCiPipelines(w http.ResponseWri } createResp, err := handler.pipelineBuilder.PatchCiPipeline(&patchRequest) if err != nil { - if err.Error() == bean1.PIPELINE_NAME_ALREADY_EXISTS_ERROR { + if err.Error() == CiPipeline.PIPELINE_NAME_ALREADY_EXISTS_ERROR { handler.Logger.Errorw("service err, pipeline name already exist ", "err", err) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return @@ -446,7 +454,7 @@ func (handler PipelineConfigRestHandlerImpl) PatchCiPipelines(w http.ResponseWri common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCiPipeline(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCiPipeline(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -478,7 +486,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCiPipeline(w http.ResponseWriter common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetExternalCi(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetExternalCi(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -507,7 +515,7 @@ func (handler PipelineConfigRestHandlerImpl) GetExternalCi(w http.ResponseWriter common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetExternalCiById(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetExternalCiById(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -542,7 +550,7 @@ func (handler PipelineConfigRestHandlerImpl) GetExternalCiById(w http.ResponseWr common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -638,7 +646,7 @@ func (handler PipelineConfigRestHandlerImpl) TriggerCiPipeline(w http.ResponseWr common.WriteJsonResp(w, err, response, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FetchMaterials(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchMaterials(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -686,7 +694,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchMaterials(w http.ResponseWrite common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FetchMaterialsByMaterialId(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchMaterialsByMaterialId(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -739,7 +747,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchMaterialsByMaterialId(w http.R common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) RefreshMaterials(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) RefreshMaterials(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -778,7 +786,7 @@ func (handler PipelineConfigRestHandlerImpl) RefreshMaterials(w http.ResponseWri common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCiPipelineMin(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCiPipelineMin(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -824,7 +832,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCiPipelineMin(w http.ResponseWri common.WriteJsonResp(w, err, ciPipelines, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) DownloadCiWorkflowArtifacts(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) DownloadCiWorkflowArtifacts(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -878,7 +886,7 @@ func (handler PipelineConfigRestHandlerImpl) DownloadCiWorkflowArtifacts(w http. } } -func (handler PipelineConfigRestHandlerImpl) GetHistoricBuildLogs(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetHistoricBuildLogs(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1073,7 +1081,7 @@ func (handler *PipelineConfigRestHandlerImpl) GetBuildLogs(w http.ResponseWriter handler.streamOutput(w, logsReader, lastSeenMsgId) } -func (handler PipelineConfigRestHandlerImpl) FetchMaterialInfo(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchMaterialInfo(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1114,7 +1122,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchMaterialInfo(w http.ResponseWr common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCIPipelineById(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCIPipelineById(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -1172,7 +1180,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCIPipelineById(w http.ResponseWr common.WriteJsonResp(w, err, ciPipeline, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCIPipelineByPipelineId(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCIPipelineByPipelineId(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") var ciPipelineId int var err error @@ -1235,7 +1243,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCIPipelineByPipelineId(w http.Re common.WriteJsonResp(w, err, ciPipeline, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) CreateMaterial(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CreateMaterial(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1288,7 +1296,7 @@ func (handler PipelineConfigRestHandlerImpl) CreateMaterial(w http.ResponseWrite common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) UpdateMaterial(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) UpdateMaterial(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1339,7 +1347,7 @@ func (handler PipelineConfigRestHandlerImpl) UpdateMaterial(w http.ResponseWrite common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) DeleteMaterial(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) DeleteMaterial(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -1378,7 +1386,7 @@ func (handler PipelineConfigRestHandlerImpl) DeleteMaterial(w http.ResponseWrite common.WriteJsonResp(w, err, GIT_MATERIAL_DELETE_SUCCESS_RESP, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) HandleWorkflowWebhook(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) HandleWorkflowWebhook(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) var wfUpdateReq v1alpha1.WorkflowStatus err := decoder.Decode(&wfUpdateReq) @@ -1397,7 +1405,7 @@ func (handler PipelineConfigRestHandlerImpl) HandleWorkflowWebhook(w http.Respon common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) validForMultiMaterial(ciTriggerRequest bean.CiTriggerRequest) bool { +func (handler *PipelineConfigRestHandlerImpl) validForMultiMaterial(ciTriggerRequest bean.CiTriggerRequest) bool { if len(ciTriggerRequest.CiPipelineMaterial) > 1 { for _, m := range ciTriggerRequest.CiPipelineMaterial { if m.GitCommit.Commit == "" { @@ -1408,7 +1416,7 @@ func (handler PipelineConfigRestHandlerImpl) validForMultiMaterial(ciTriggerRequ return true } -func (handler PipelineConfigRestHandlerImpl) ValidateGitMaterialUrl(gitProviderId int, url string) (bool, error) { +func (handler *PipelineConfigRestHandlerImpl) ValidateGitMaterialUrl(gitProviderId int, url string) (bool, error) { gitProvider, err := handler.gitProviderRepo.FindOne(strconv.Itoa(gitProviderId)) if err != nil { return false, err @@ -1421,7 +1429,7 @@ func (handler PipelineConfigRestHandlerImpl) ValidateGitMaterialUrl(gitProviderI return hasPrefixResult, nil } -func (handler PipelineConfigRestHandlerImpl) CancelWorkflow(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CancelWorkflow(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1501,7 +1509,7 @@ func (handler PipelineConfigRestHandlerImpl) CancelWorkflow(w http.ResponseWrite } // FetchChanges FIXME check if deprecated -func (handler PipelineConfigRestHandlerImpl) FetchChanges(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchChanges(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1559,7 +1567,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchChanges(w http.ResponseWriter, common.WriteJsonResp(w, err, changes.Commits, http.StatusCreated) } -func (handler PipelineConfigRestHandlerImpl) GetCommitMetadataForPipelineMaterial(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCommitMetadataForPipelineMaterial(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1605,7 +1613,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCommitMetadataForPipelineMateria common.WriteJsonResp(w, err, commit, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FetchWorkflowDetails(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchWorkflowDetails(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1656,7 +1664,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchWorkflowDetails(w http.Respons common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetArtifactsForCiJob(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetArtifactsForCiJob(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1701,7 +1709,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForCiJob(w http.Respons common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironment(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironment(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1759,7 +1767,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironment(w http.R common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironmentMin(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironmentMin(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1816,7 +1824,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCiPipelineByEnvironmentMin(w htt common.WriteJsonResp(w, err, results, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetExternalCiByEnvironment(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetExternalCiByEnvironment(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1873,7 +1881,7 @@ func (handler PipelineConfigRestHandlerImpl) GetExternalCiByEnvironment(w http.R common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) CreateUpdateImageTagging(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CreateUpdateImageTagging(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -1966,7 +1974,7 @@ func (handler PipelineConfigRestHandlerImpl) CreateUpdateImageTagging(w http.Res common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetImageTaggingData(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetImageTaggingData(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -2013,7 +2021,7 @@ func (handler PipelineConfigRestHandlerImpl) GetImageTaggingData(w http.Response common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) extractCipipelineMetaForImageTags(artifactId int) (externalCi bool, ciPipelineId int, appId int, err error) { +func (handler *PipelineConfigRestHandlerImpl) extractCipipelineMetaForImageTags(artifactId int) (externalCi bool, ciPipelineId int, appId int, err error) { externalCi = false ciPipelineId = 0 appId = 0 @@ -2064,7 +2072,7 @@ func (handler PipelineConfigRestHandlerImpl) extractCipipelineMetaForImageTags(a return externalCi, ciPipelineId, appId, nil } -func (handler PipelineConfigRestHandlerImpl) checkAppSpecificAccess(token, action string, appId int) (bool, error) { +func (handler *PipelineConfigRestHandlerImpl) checkAppSpecificAccess(token, action string, appId int) (bool, error) { app, err := handler.pipelineBuilder.GetApp(appId) if err != nil { return false, err @@ -2079,3 +2087,94 @@ func (handler PipelineConfigRestHandlerImpl) checkAppSpecificAccess(token, actio } return true, nil } + +func (handler *PipelineConfigRestHandlerImpl) GetSourceCiDownStreamFilters(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + vars := mux.Vars(r) + ciPipelineId, err := strconv.Atoi(vars["ciPipelineId"]) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + ciPipeline, err := handler.ciPipelineRepository.FindOneWithAppData(ciPipelineId) + if util.IsErrNoRows(err) { + common.WriteJsonResp(w, fmt.Errorf("invalid CiPipelineId %d", ciPipelineId), nil, http.StatusBadRequest) + return + } else if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + token := r.Header.Get("token") + // RBAC enforcer applying + resourceName := handler.enforcerUtil.GetAppRBACName(ciPipeline.App.AppName) + if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionGet, resourceName); !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + // RBAC enforcer Ends + resp, err := handler.ciCdPipelineOrchestrator.GetSourceCiDownStreamFilters(r.Context(), ciPipelineId) + if err != nil { + common.WriteJsonResp(w, fmt.Errorf("error getting environment info for given source Ci pipeline id"), "error getting environment info for given source Ci pipeline id", http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, err, resp, http.StatusOK) +} + +func (handler *PipelineConfigRestHandlerImpl) GetSourceCiDownStreamInfo(w http.ResponseWriter, r *http.Request) { + decoder := schema.NewDecoder() + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + vars := mux.Vars(r) + ciPipelineId, err := strconv.Atoi(vars["ciPipelineId"]) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + req := &CiPipeline.SourceCiDownStreamFilters{} + err = decoder.Decode(req, r.URL.Query()) + if err != nil { + handler.Logger.Errorw("request err, GetSourceCiDownStreamInfo", "err", err, "payload", req) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + // Convert searchKey to lowercase + req.SearchKey = strings.ToLower(req.SearchKey) + req.SortBy = pagination.AppName + if req.Size == 0 { + req.Size = 20 + } + if len(req.SortOrder) == 0 { + req.SortOrder = pagination.Asc + } + token := r.Header.Get("token") + ciPipeline, err := handler.ciPipelineRepository.FindOneWithAppData(ciPipelineId) + if util.IsErrNoRows(err) { + common.WriteJsonResp(w, fmt.Errorf("invalid CiPipelineId %d", ciPipelineId), nil, http.StatusBadRequest) + return + } else if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + // RBAC enforcer applying + resourceName := handler.enforcerUtil.GetAppRBACName(ciPipeline.App.AppName) + if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, casbin.ActionGet, resourceName); !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + // RBAC enforcer Ends + linkedCIDetails, err := handler.ciCdPipelineOrchestrator.GetSourceCiDownStreamInfo(r.Context(), ciPipelineId, req) + if err != nil { + handler.Logger.Errorw("service err, PatchCiPipelines", "err", err, "ciPipelineId", ciPipelineId) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, err, linkedCIDetails, http.StatusOK) +} diff --git a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go index 353e3b8af3..74ec713a6a 100644 --- a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go @@ -98,7 +98,7 @@ type DevtronAppDeploymentHistoryRestHandler interface { DownloadArtifacts(w http.ResponseWriter, r *http.Request) } -func (handler PipelineConfigRestHandlerImpl) ConfigureDeploymentTemplateForApp(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) ConfigureDeploymentTemplateForApp(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -169,7 +169,7 @@ func (handler PipelineConfigRestHandlerImpl) ConfigureDeploymentTemplateForApp(w common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -240,7 +240,7 @@ func (handler PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWri common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) PatchCdPipeline(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) PatchCdPipeline(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -338,7 +338,7 @@ func (handler PipelineConfigRestHandlerImpl) PatchCdPipeline(w http.ResponseWrit } // HandleChangeDeploymentRequest changes the deployment app type for all pipelines in all apps for a given environment. -func (handler PipelineConfigRestHandlerImpl) HandleChangeDeploymentRequest(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) HandleChangeDeploymentRequest(w http.ResponseWriter, r *http.Request) { // Auth check userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -401,7 +401,7 @@ func (handler PipelineConfigRestHandlerImpl) HandleChangeDeploymentRequest(w htt return } -func (handler PipelineConfigRestHandlerImpl) HandleChangeDeploymentTypeRequest(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) HandleChangeDeploymentTypeRequest(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -456,7 +456,7 @@ func (handler PipelineConfigRestHandlerImpl) HandleChangeDeploymentTypeRequest(w return } -func (handler PipelineConfigRestHandlerImpl) HandleTriggerDeploymentAfterTypeChange(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) HandleTriggerDeploymentAfterTypeChange(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -516,7 +516,7 @@ func (handler PipelineConfigRestHandlerImpl) HandleTriggerDeploymentAfterTypeCha return } -func (handler PipelineConfigRestHandlerImpl) ChangeChartRef(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) ChangeChartRef(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -666,7 +666,7 @@ func (handler PipelineConfigRestHandlerImpl) ChangeChartRef(w http.ResponseWrite common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideCreate(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideCreate(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -773,7 +773,7 @@ func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideCreate(w http.Resp common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideUpdate(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideUpdate(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) //userId := getLoggedInUser(r) userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -838,7 +838,7 @@ func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideUpdate(w http.Resp common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetEnvConfigOverride(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetEnvConfigOverride(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) environmentId, err := strconv.Atoi(vars["environmentId"]) if err != nil { @@ -885,7 +885,7 @@ func (handler PipelineConfigRestHandlerImpl) GetEnvConfigOverride(w http.Respons common.WriteJsonResp(w, err, env, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetTemplateComparisonMetadata(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetTemplateComparisonMetadata(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") appId, err := strconv.Atoi(vars["appId"]) @@ -918,7 +918,7 @@ func (handler PipelineConfigRestHandlerImpl) GetTemplateComparisonMetadata(w htt common.WriteJsonResp(w, nil, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) var request generateManifest.DeploymentTemplateRequest @@ -959,7 +959,7 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplateData(w http.Re common.WriteJsonResp(w, nil, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetDeploymentTemplate(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetDeploymentTemplate(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -1093,7 +1093,7 @@ func (handler *PipelineConfigRestHandlerImpl) GetDefaultDeploymentTemplate(w htt common.WriteJsonResp(w, nil, defaultTemplate, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCdPipelines(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCdPipelines(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -1125,7 +1125,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCdPipelines(w http.ResponseWrite common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCdPipelinesForAppAndEnv(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCdPipelinesForAppAndEnv(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) if err != nil { @@ -1170,7 +1170,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCdPipelinesForAppAndEnv(w http.R common.WriteJsonResp(w, err, cdPipelines, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) cdPipelineId, err := strconv.Atoi(vars["cd_pipeline_id"]) @@ -1181,7 +1181,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res } stage := r.URL.Query().Get("stage") if len(stage) == 0 { - stage = pipeline.WorklowTypePre + stage = pipelineBean.WorkflowTypePre } searchString := "" search := r.URL.Query().Get("search") @@ -1338,7 +1338,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsByCDPipeline(w http.Res common.WriteJsonResp(w, err, ciArtifactResponse, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetAppOverrideForDefaultTemplate(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetAppOverrideForDefaultTemplate(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -1371,7 +1371,7 @@ func (handler PipelineConfigRestHandlerImpl) GetAppOverrideForDefaultTemplate(w common.WriteJsonResp(w, err, appOverride, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) UpdateAppOverride(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) UpdateAppOverride(w http.ResponseWriter, r *http.Request) { decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -1436,7 +1436,7 @@ func (handler PipelineConfigRestHandlerImpl) UpdateAppOverride(w http.ResponseWr common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) cdPipelineId, err := strconv.Atoi(vars["cd_pipeline_id"]) if err != nil { @@ -1520,7 +1520,7 @@ func (handler PipelineConfigRestHandlerImpl) GetArtifactsForRollback(w http.Resp common.WriteJsonResp(w, err, ciArtifactResponse, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideReset(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideReset(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1715,7 +1715,7 @@ func (handler *PipelineConfigRestHandlerImpl) GetPrePostDeploymentLogs(w http.Re handler.streamOutput(w, logsReader, lastSeenMsgId) } -func (handler PipelineConfigRestHandlerImpl) FetchCdWorkflowDetails(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchCdWorkflowDetails(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1767,7 +1767,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchCdWorkflowDetails(w http.Respo common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) DownloadArtifacts(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) DownloadArtifacts(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1827,7 +1827,7 @@ func (handler PipelineConfigRestHandlerImpl) DownloadArtifacts(w http.ResponseWr } } -func (handler PipelineConfigRestHandlerImpl) GetStageStatus(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetStageStatus(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1874,7 +1874,7 @@ func (handler PipelineConfigRestHandlerImpl) GetStageStatus(w http.ResponseWrite common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetConfigmapSecretsForDeploymentStages(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetConfigmapSecretsForDeploymentStages(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1902,7 +1902,7 @@ func (handler PipelineConfigRestHandlerImpl) GetConfigmapSecretsForDeploymentSta common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCdPipelineById(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCdPipelineById(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -1952,7 +1952,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCdPipelineById(w http.ResponseWr common.WriteJsonResp(w, err, cdResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) CancelStage(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CancelStage(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -1998,7 +1998,7 @@ func (handler PipelineConfigRestHandlerImpl) CancelStage(w http.ResponseWriter, common.WriteJsonResp(w, err, resp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetDeploymentPipelineStrategy(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetDeploymentPipelineStrategy(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -2024,7 +2024,7 @@ func (handler PipelineConfigRestHandlerImpl) GetDeploymentPipelineStrategy(w htt common.WriteJsonResp(w, err, result, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetDefaultDeploymentPipelineStrategy(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetDefaultDeploymentPipelineStrategy(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -2056,7 +2056,7 @@ func (handler PipelineConfigRestHandlerImpl) GetDefaultDeploymentPipelineStrateg common.WriteJsonResp(w, err, result, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideCreateNamespace(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) EnvConfigOverrideCreateNamespace(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -2108,7 +2108,7 @@ func (handler PipelineConfigRestHandlerImpl) EnvConfigOverrideCreateNamespace(w common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) IsReadyToTrigger(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) IsReadyToTrigger(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -2149,7 +2149,7 @@ func (handler PipelineConfigRestHandlerImpl) IsReadyToTrigger(w http.ResponseWri common.WriteJsonResp(w, err, result, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) UpgradeForAllApps(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) UpgradeForAllApps(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -2259,7 +2259,7 @@ func (handler PipelineConfigRestHandlerImpl) UpgradeForAllApps(w http.ResponseWr common.WriteJsonResp(w, err, response, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCdPipelinesByEnvironment(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCdPipelinesByEnvironment(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -2317,7 +2317,7 @@ func (handler PipelineConfigRestHandlerImpl) GetCdPipelinesByEnvironment(w http. common.WriteJsonResp(w, err, results, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetCdPipelinesByEnvironmentMin(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetCdPipelinesByEnvironmentMin(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { diff --git a/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go b/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go index 4e7303c03d..43677baf7c 100644 --- a/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go +++ b/api/restHandler/app/pipeline/configure/PipelineConfigRestHandler.go @@ -26,6 +26,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/manifest/deployedAppMetrics" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate" "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef" + bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "io" "net/http" "strconv" @@ -39,7 +40,6 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/chart" "github.com/devtron-labs/devtron/pkg/generateManifest" - bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" "github.com/devtron-labs/devtron/util/argo" "github.com/go-pg/pg" @@ -129,6 +129,7 @@ type PipelineConfigRestHandlerImpl struct { ciArtifactRepository repository.CiArtifactRepository deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService chartRefService chartRef.ChartRefService + ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator } func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger *zap.SugaredLogger, @@ -155,7 +156,8 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger imageTaggingService pipeline.ImageTaggingService, ciArtifactRepository repository.CiArtifactRepository, deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, - chartRefService chartRef.ChartRefService) *PipelineConfigRestHandlerImpl { + chartRefService chartRef.ChartRefService, + ciCdPipelineOrchestrator pipeline.CiCdPipelineOrchestrator) *PipelineConfigRestHandlerImpl { envConfig := &PipelineRestHandlerEnvConfig{} err := env.Parse(envConfig) if err != nil { @@ -193,6 +195,7 @@ func NewPipelineRestHandlerImpl(pipelineBuilder pipeline.PipelineBuilder, Logger ciArtifactRepository: ciArtifactRepository, deployedAppMetricsService: deployedAppMetricsService, chartRefService: chartRefService, + ciCdPipelineOrchestrator: ciCdPipelineOrchestrator, } } @@ -202,7 +205,7 @@ const ( HTTPS_URL_PREFIX = "https://" ) -func (handler PipelineConfigRestHandlerImpl) DeleteApp(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) DeleteApp(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -244,7 +247,7 @@ func (handler PipelineConfigRestHandlerImpl) DeleteApp(w http.ResponseWriter, r common.WriteJsonResp(w, err, nil, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) DeleteACDAppWithNonCascade(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) DeleteACDAppWithNonCascade(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -324,7 +327,7 @@ func (handler PipelineConfigRestHandlerImpl) DeleteACDAppWithNonCascade(w http.R common.WriteJsonResp(w, err, nil, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) CreateApp(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) CreateApp(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") decoder := json.NewDecoder(r.Body) userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -400,7 +403,7 @@ func (handler PipelineConfigRestHandlerImpl) CreateApp(w http.ResponseWriter, r common.WriteJsonResp(w, err, createResp, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetApp(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetApp(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -429,7 +432,7 @@ func (handler PipelineConfigRestHandlerImpl) GetApp(w http.ResponseWriter, r *ht common.WriteJsonResp(w, err, ciConf, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FindAppsByTeamId(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FindAppsByTeamId(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) teamId, err := strconv.Atoi(vars["teamId"]) if err != nil { @@ -447,7 +450,7 @@ func (handler PipelineConfigRestHandlerImpl) FindAppsByTeamId(w http.ResponseWri common.WriteJsonResp(w, err, project, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FindAppsByTeamName(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FindAppsByTeamName(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) teamName := vars["teamName"] handler.Logger.Infow("request payload, FindAppsByTeamName", "teamName", teamName) @@ -564,7 +567,7 @@ func (handler *PipelineConfigRestHandlerImpl) sendData(event []byte, w http.Resp } } -func (handler PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerView(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerView(w http.ResponseWriter, r *http.Request) { userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) @@ -637,7 +640,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerVie common.WriteJsonResp(w, err, triggerWorkflowStatus, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) PipelineNameSuggestion(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) PipelineNameSuggestion(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") vars := mux.Vars(r) appId, err := strconv.Atoi(vars["appId"]) @@ -663,7 +666,7 @@ func (handler PipelineConfigRestHandlerImpl) PipelineNameSuggestion(w http.Respo common.WriteJsonResp(w, err, suggestedName, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerViewByEnvironment(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerViewByEnvironment(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -741,7 +744,7 @@ func (handler PipelineConfigRestHandlerImpl) FetchAppWorkflowStatusForTriggerVie common.WriteJsonResp(w, err, triggerWorkflowStatus, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetEnvironmentListWithAppData(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetEnvironmentListWithAppData(w http.ResponseWriter, r *http.Request) { v := r.URL.Query() token := r.Header.Get("token") envName := v.Get("envName") @@ -779,7 +782,7 @@ func (handler PipelineConfigRestHandlerImpl) GetEnvironmentListWithAppData(w htt common.WriteJsonResp(w, err, result, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) GetApplicationsByEnvironment(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) GetApplicationsByEnvironment(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) @@ -834,7 +837,7 @@ func (handler PipelineConfigRestHandlerImpl) GetApplicationsByEnvironment(w http common.WriteJsonResp(w, err, results, http.StatusOK) } -func (handler PipelineConfigRestHandlerImpl) FetchAppDeploymentStatusForEnvironments(w http.ResponseWriter, r *http.Request) { +func (handler *PipelineConfigRestHandlerImpl) FetchAppDeploymentStatusForEnvironments(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("token") userId, err := handler.userAuthService.GetLoggedInUser(r) if userId == 0 || err != nil { diff --git a/api/router/app/pipeline/configure/PipelineConfigRouter.go b/api/router/app/pipeline/configure/PipelineConfigRouter.go index 9b23629739..23cfe6aeed 100644 --- a/api/router/app/pipeline/configure/PipelineConfigRouter.go +++ b/api/router/app/pipeline/configure/PipelineConfigRouter.go @@ -85,6 +85,9 @@ func (router PipelineConfigRouterImpl) InitPipelineConfigRouter(configRouter *mu configRouter.Path("/ci-pipeline/bulk/branch-update").HandlerFunc(router.restHandler.PatchCiMaterialSourceWithAppIdsAndEnvironmentId).Methods("PUT") configRouter.Path("/ci-pipeline/patch/regex").HandlerFunc(router.restHandler.UpdateBranchCiPipelinesWithRegex).Methods("POST") + configRouter.Path("/ci-pipeline/{ciPipelineId}/linked-ci/downstream/env").HandlerFunc(router.restHandler.GetSourceCiDownStreamFilters).Methods("GET") + configRouter.Path("/ci-pipeline/{ciPipelineId}/linked-ci/downstream/cd").HandlerFunc(router.restHandler.GetSourceCiDownStreamInfo).Methods("GET") + configRouter.Path("/cd-pipeline/{cd_pipeline_id}/material").HandlerFunc(router.restHandler.GetArtifactsByCDPipeline).Methods("GET") configRouter.Path("/cd-pipeline/{cd_pipeline_id}/material/rollback").HandlerFunc(router.restHandler.GetArtifactsForRollback).Methods("GET") diff --git a/client/cron/CiTriggerCron.go b/client/cron/CiTriggerCron.go index cc56379b8a..ea4b4035a7 100644 --- a/client/cron/CiTriggerCron.go +++ b/client/cron/CiTriggerCron.go @@ -6,7 +6,7 @@ import ( repository2 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline" - pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/repository" repository3 "github.com/devtron-labs/devtron/pkg/plugin/repository" cron2 "github.com/devtron-labs/devtron/util/cron" diff --git a/client/telemetry/TelemetryEventClientExtended.go b/client/telemetry/TelemetryEventClientExtended.go index 2a8e725546..d80134097a 100644 --- a/client/telemetry/TelemetryEventClientExtended.go +++ b/client/telemetry/TelemetryEventClientExtended.go @@ -5,6 +5,7 @@ import ( cloudProviderIdentifier "github.com/devtron-labs/common-lib/cloud-provider-identifier" client "github.com/devtron-labs/devtron/api/helm-app/gRPC" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/config" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" cron3 "github.com/devtron-labs/devtron/util/cron" "net/http" "time" @@ -21,7 +22,6 @@ import ( "github.com/devtron-labs/devtron/pkg/cluster" moduleRepo "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/pipeline" - "github.com/devtron-labs/devtron/pkg/pipeline/bean" serverDataStore "github.com/devtron-labs/devtron/pkg/server/store" util3 "github.com/devtron-labs/devtron/pkg/util" "github.com/devtron-labs/devtron/util" @@ -345,16 +345,16 @@ func (impl *TelemetryEventClientImplExtended) SendSummaryEvent(eventType string) } payload.SelfDockerfileCount = selfDockerfileCount - payload.SelfDockerfileSuccessCount = successCount[bean.SELF_DOCKERFILE_BUILD_TYPE] - payload.SelfDockerfileFailureCount = failureCount[bean.SELF_DOCKERFILE_BUILD_TYPE] + payload.SelfDockerfileSuccessCount = successCount[CiPipeline.SELF_DOCKERFILE_BUILD_TYPE] + payload.SelfDockerfileFailureCount = failureCount[CiPipeline.SELF_DOCKERFILE_BUILD_TYPE] payload.ManagedDockerfileCount = managedDockerfileCount - payload.ManagedDockerfileSuccessCount = successCount[bean.MANAGED_DOCKERFILE_BUILD_TYPE] - payload.ManagedDockerfileFailureCount = failureCount[bean.MANAGED_DOCKERFILE_BUILD_TYPE] + payload.ManagedDockerfileSuccessCount = successCount[CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE] + payload.ManagedDockerfileFailureCount = failureCount[CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE] payload.BuildPackCount = buildpackCount - payload.BuildPackSuccessCount = successCount[bean.BUILDPACK_BUILD_TYPE] - payload.BuildPackFailureCount = failureCount[bean.BUILDPACK_BUILD_TYPE] + payload.BuildPackSuccessCount = successCount[CiPipeline.BUILDPACK_BUILD_TYPE] + payload.BuildPackFailureCount = failureCount[CiPipeline.BUILDPACK_BUILD_TYPE] reqBody, err := json.Marshal(payload) if err != nil { @@ -378,24 +378,24 @@ func (impl *TelemetryEventClientImplExtended) SendSummaryEvent(eventType string) func (impl *TelemetryEventClientImplExtended) getCiBuildTypeData() (int, int, int) { countByBuildType := impl.ciBuildConfigService.GetCountByBuildType() - return countByBuildType[bean.SELF_DOCKERFILE_BUILD_TYPE], countByBuildType[bean.MANAGED_DOCKERFILE_BUILD_TYPE], countByBuildType[bean.BUILDPACK_BUILD_TYPE] + return countByBuildType[CiPipeline.SELF_DOCKERFILE_BUILD_TYPE], countByBuildType[CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE], countByBuildType[CiPipeline.BUILDPACK_BUILD_TYPE] } -func (impl *TelemetryEventClientImplExtended) getCiBuildTypeVsStatusVsCount() (successCount map[bean.CiBuildType]int, failureCount map[bean.CiBuildType]int) { - successCount = make(map[bean.CiBuildType]int) - failureCount = make(map[bean.CiBuildType]int) +func (impl *TelemetryEventClientImplExtended) getCiBuildTypeVsStatusVsCount() (successCount map[CiPipeline.CiBuildType]int, failureCount map[CiPipeline.CiBuildType]int) { + successCount = make(map[CiPipeline.CiBuildType]int) + failureCount = make(map[CiPipeline.CiBuildType]int) buildTypeAndStatusVsCount := impl.ciWorkflowRepository.FindBuildTypeAndStatusDataOfLast1Day() for _, buildTypeCount := range buildTypeAndStatusVsCount { if buildTypeCount == nil { continue } if buildTypeCount.Type == "" { - buildTypeCount.Type = string(bean.SELF_DOCKERFILE_BUILD_TYPE) + buildTypeCount.Type = string(CiPipeline.SELF_DOCKERFILE_BUILD_TYPE) } if buildTypeCount.Status == "Succeeded" { - successCount[bean.CiBuildType(buildTypeCount.Type)] = buildTypeCount.Count + successCount[CiPipeline.CiBuildType(buildTypeCount.Type)] = buildTypeCount.Count } else { - failureCount[bean.CiBuildType(buildTypeCount.Type)] = buildTypeCount.Count + failureCount[CiPipeline.CiBuildType(buildTypeCount.Type)] = buildTypeCount.Count } } return successCount, failureCount diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index d2864de0b4..d090772fac 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:generate go run github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 23f9229660..b24c79934a 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -75,6 +75,7 @@ type CdWorkflowRepository interface { FetchArtifactsByCdPipelineId(pipelineId int, runnerType bean.WorkflowType, offset, limit int, searchString string) ([]CdWorkflowRunner, error) GetLatestTriggersOfHelmPipelinesStuckInNonTerminalStatuses(getPipelineDeployedWithinHours int) ([]*CdWorkflowRunner, error) + FindLatestRunnerByPipelineIdsAndRunnerType(ctx context.Context, pipelineIds []int, runnerType bean.WorkflowType) ([]CdWorkflowRunner, error) } type CdWorkflowRepositoryImpl struct { @@ -739,3 +740,26 @@ func (impl *CdWorkflowRepositoryImpl) CheckWorkflowRunnerByReferenceId(reference } return exists, err } + +func (impl *CdWorkflowRepositoryImpl) FindLatestRunnerByPipelineIdsAndRunnerType(ctx context.Context, pipelineIds []int, runnerType bean.WorkflowType) ([]CdWorkflowRunner, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "FindLatestRunnerByPipelineIdsAndRunnerType") + defer span.End() + if pipelineIds == nil || len(pipelineIds) == 0 { + return nil, pg.ErrNoRows + } + var latestWfrs []CdWorkflowRunner + err := impl.dbConnection. + Model(&latestWfrs). + Column("cd_workflow_runner.*", "CdWorkflow", "CdWorkflow.Pipeline"). + ColumnExpr("MAX(cd_workflow_runner.id)"). + Where("cd_workflow.pipeline_id IN (?)", pg.In(pipelineIds)). + Where("cd_workflow_runner.workflow_type = ?", runnerType). + Where("cd_workflow__pipeline.deleted = ?", false). + Group("cd_workflow_runner.id", "cd_workflow.id", "cd_workflow__pipeline.id"). + Select() + if err != nil { + impl.logger.Errorw("error in getting cdWfr by appId, envId and runner type", "pipelineIds", pipelineIds, "runnerType", runnerType) + return nil, err + } + return latestWfrs, err +} diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index 3fd5eb48e3..495b4b7d64 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -18,11 +18,17 @@ package pipelineConfig import ( + "context" + "fmt" "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean" "github.com/devtron-labs/devtron/pkg/cluster/repository" + ciPipelineBean "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/sql" + "github.com/devtron-labs/devtron/util/response/pagination" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" + "go.opentelemetry.io/otel" "go.uber.org/zap" "strconv" "time" @@ -119,7 +125,7 @@ type CiPipelineRepository interface { FindByParentCiPipelineId(parentCiPipelineId int) ([]*CiPipeline, error) FindByParentIdAndType(parentCiPipelineId int, pipelineType string) ([]*CiPipeline, error) - FetchParentCiPipelinesForDG() ([]*CiPipelinesMap, error) + FetchParentCiPipelinesForDG() ([]*bean.CiPipelinesMap, error) FetchCiPipelinesForDG(parentId int, childCiPipelineIds []int) (*CiPipeline, int, error) FinDByParentCiPipelineAndAppId(parentCiPipeline int, appIds []int) ([]*CiPipeline, error) FindAllPipelineInLast24Hour() (pipelines []*CiPipeline, err error) @@ -132,7 +138,11 @@ type CiPipelineRepository interface { GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) FindLinkedCiCount(ciPipelineId int) (int, error) + GetLinkedCiPipelines(ctx context.Context, ciPipelineId int) ([]*CiPipeline, error) + GetDownStreamInfo(ctx context.Context, sourceCiPipelineId int, + appNameMatch, envNameMatch string, req *pagination.RepositoryRequest) ([]bean.LinkedCIDetails, int, error) } + type CiPipelineRepositoryImpl struct { dbConnection *pg.DB logger *zap.SugaredLogger @@ -147,7 +157,7 @@ func NewCiPipelineRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) } } -func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineId(parentCiPipelineId int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindByParentCiPipelineId(parentCiPipelineId int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). Where("parent_ci_pipeline = ?", parentCiPipelineId). @@ -156,7 +166,7 @@ func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineId(parentCiPipelineId return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindByParentIdAndType(parentCiPipelineId int, pipelineType string) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindByParentIdAndType(parentCiPipelineId int, pipelineType string) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). Where("parent_ci_pipeline = ?", parentCiPipelineId). @@ -166,7 +176,7 @@ func (impl CiPipelineRepositoryImpl) FindByParentIdAndType(parentCiPipelineId in return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). Where("id in (?)", pg.In(ids)). @@ -174,39 +184,41 @@ func (impl CiPipelineRepositoryImpl) FindByIdsIn(ids []int) ([]*CiPipeline, erro return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) SaveExternalCi(pipeline *ExternalCiPipeline, tx *pg.Tx) (*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) SaveExternalCi(pipeline *ExternalCiPipeline, tx *pg.Tx) (*ExternalCiPipeline, error) { err := tx.Insert(pipeline) return pipeline, err } -func (impl CiPipelineRepositoryImpl) UpdateExternalCi(pipeline *ExternalCiPipeline, tx *pg.Tx) (*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) UpdateExternalCi(pipeline *ExternalCiPipeline, tx *pg.Tx) (*ExternalCiPipeline, error) { err := tx.Update(pipeline) return pipeline, err } -func (impl CiPipelineRepositoryImpl) Save(pipeline *CiPipeline, tx *pg.Tx) error { +func (impl *CiPipelineRepositoryImpl) Save(pipeline *CiPipeline, tx *pg.Tx) error { return tx.Insert(pipeline) } -func (impl CiPipelineRepositoryImpl) SaveCiEnvMapping(cienvmapping *CiEnvMapping, tx *pg.Tx) error { + +func (impl *CiPipelineRepositoryImpl) SaveCiEnvMapping(cienvmapping *CiEnvMapping, tx *pg.Tx) error { return tx.Insert(cienvmapping) } -func (impl CiPipelineRepositoryImpl) UpdateCiEnvMapping(cienvmapping *CiEnvMapping, tx *pg.Tx) error { + +func (impl *CiPipelineRepositoryImpl) UpdateCiEnvMapping(cienvmapping *CiEnvMapping, tx *pg.Tx) error { return tx.Update(cienvmapping) } -func (impl CiPipelineRepositoryImpl) Update(pipeline *CiPipeline, tx *pg.Tx) error { +func (impl *CiPipelineRepositoryImpl) Update(pipeline *CiPipeline, tx *pg.Tx) error { r, err := tx.Model(pipeline).WherePK().UpdateNotNull() impl.logger.Debugf("total rows saved %d", r.RowsAffected()) return err } -func (impl CiPipelineRepositoryImpl) UpdateCiPipelineScript(script *CiPipelineScript, tx *pg.Tx) error { +func (impl *CiPipelineRepositoryImpl) UpdateCiPipelineScript(script *CiPipelineScript, tx *pg.Tx) error { r, err := tx.Model(script).WherePK().UpdateNotNull() impl.logger.Debugf("total rows saved %d", r.RowsAffected()) return err } -func (impl CiPipelineRepositoryImpl) MarkCiPipelineScriptsInactiveByCiPipelineId(ciPipelineId int, tx *pg.Tx) error { +func (impl *CiPipelineRepositoryImpl) MarkCiPipelineScriptsInactiveByCiPipelineId(ciPipelineId int, tx *pg.Tx) error { var script CiPipelineScript _, err := tx.Model(&script).Set("active = ?", false). Where("ci_pipeline_id = ?", ciPipelineId).Update() @@ -218,7 +230,7 @@ func (impl CiPipelineRepositoryImpl) MarkCiPipelineScriptsInactiveByCiPipelineId return nil } -func (impl CiPipelineRepositoryImpl) FindByAppId(appId int) (pipelines []*CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindByAppId(appId int) (pipelines []*CiPipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("ci_pipeline.*", "CiPipelineMaterials", "CiPipelineMaterials.GitMaterial"). Where("ci_pipeline.app_id =?", appId). @@ -227,7 +239,7 @@ func (impl CiPipelineRepositoryImpl) FindByAppId(appId int) (pipelines []*CiPipe return pipelines, err } -func (impl CiPipelineRepositoryImpl) FindByAppIds(appIds []int) (pipelines []*CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindByAppIds(appIds []int) (pipelines []*CiPipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiPipelineMaterials.GitMaterial"). Where("ci_pipeline.app_id in (?)", pg.In(appIds)). @@ -236,7 +248,7 @@ func (impl CiPipelineRepositoryImpl) FindByAppIds(appIds []int) (pipelines []*Ci return pipelines, err } -func (impl CiPipelineRepositoryImpl) FindExternalCiByCiPipelineId(ciPipelineId int) (*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindExternalCiByCiPipelineId(ciPipelineId int) (*ExternalCiPipeline, error) { externalCiPipeline := &ExternalCiPipeline{} err := impl.dbConnection.Model(externalCiPipeline). Column("external_ci_pipeline.*", "CiPipeline"). @@ -246,7 +258,7 @@ func (impl CiPipelineRepositoryImpl) FindExternalCiByCiPipelineId(ciPipelineId i return externalCiPipeline, err } -func (impl CiPipelineRepositoryImpl) FindExternalCiById(id int) (*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindExternalCiById(id int) (*ExternalCiPipeline, error) { externalCiPipeline := &ExternalCiPipeline{} err := impl.dbConnection.Model(externalCiPipeline). Column("external_ci_pipeline.*"). @@ -256,7 +268,7 @@ func (impl CiPipelineRepositoryImpl) FindExternalCiById(id int) (*ExternalCiPipe return externalCiPipeline, err } -func (impl CiPipelineRepositoryImpl) FindExternalCiByAppId(appId int) ([]*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindExternalCiByAppId(appId int) ([]*ExternalCiPipeline, error) { var externalCiPipeline []*ExternalCiPipeline err := impl.dbConnection.Model(&externalCiPipeline). Column("external_ci_pipeline.*"). @@ -266,7 +278,7 @@ func (impl CiPipelineRepositoryImpl) FindExternalCiByAppId(appId int) ([]*Extern return externalCiPipeline, err } -func (impl CiPipelineRepositoryImpl) FindExternalCiByAppIds(appIds []int) ([]*ExternalCiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindExternalCiByAppIds(appIds []int) ([]*ExternalCiPipeline, error) { var externalCiPipeline []*ExternalCiPipeline err := impl.dbConnection.Model(&externalCiPipeline). Column("external_ci_pipeline.*"). @@ -276,7 +288,7 @@ func (impl CiPipelineRepositoryImpl) FindExternalCiByAppIds(appIds []int) ([]*Ex return externalCiPipeline, err } -func (impl CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineId(ciPipelineId int) ([]*CiPipelineScript, error) { +func (impl *CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineId(ciPipelineId int) ([]*CiPipelineScript, error) { var ciPipelineScripts []*CiPipelineScript err := impl.dbConnection.Model(&ciPipelineScripts). Where("ci_pipeline_id = ?", ciPipelineId). @@ -286,7 +298,7 @@ func (impl CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineId(ciPipelineId in return ciPipelineScripts, err } -func (impl CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineIds(ciPipelineIds []int) ([]*CiPipelineScript, error) { +func (impl *CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineIds(ciPipelineIds []int) ([]*CiPipelineScript, error) { var ciPipelineScripts []*CiPipelineScript err := impl.dbConnection.Model(&ciPipelineScripts). Where("ci_pipeline_id in (?)", ciPipelineIds). @@ -296,12 +308,12 @@ func (impl CiPipelineRepositoryImpl) FindCiScriptsByCiPipelineIds(ciPipelineIds return ciPipelineScripts, err } -func (impl CiPipelineRepositoryImpl) SaveCiPipelineScript(ciPipelineScript *CiPipelineScript, tx *pg.Tx) error { +func (impl *CiPipelineRepositoryImpl) SaveCiPipelineScript(ciPipelineScript *CiPipelineScript, tx *pg.Tx) error { ciPipelineScript.Active = true return tx.Insert(ciPipelineScript) } -func (impl CiPipelineRepositoryImpl) FindByIdIncludingInActive(id int) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindByIdIncludingInActive(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{Id: id} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial"). @@ -314,7 +326,7 @@ func (impl CiPipelineRepositoryImpl) FindByIdIncludingInActive(id int) (pipeline return pipeline, err } -func (impl CiPipelineRepositoryImpl) FindById(id int) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindById(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{Id: id} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial"). @@ -329,7 +341,7 @@ func (impl CiPipelineRepositoryImpl) FindById(id int) (pipeline *CiPipeline, err } // FindOneWithAppData is to be used for fetching minimum data (including app.App) for CiPipeline for the given CiPipeline.Id -func (impl CiPipelineRepositoryImpl) FindOneWithAppData(id int) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindOneWithAppData(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "App"). @@ -339,7 +351,8 @@ func (impl CiPipelineRepositoryImpl) FindOneWithAppData(id int) (pipeline *CiPip return pipeline, err } -func (impl CiPipelineRepositoryImpl) FindCiEnvMappingByCiPipelineId(ciPipelineId int) (*CiEnvMapping, error) { + +func (impl *CiPipelineRepositoryImpl) FindCiEnvMappingByCiPipelineId(ciPipelineId int) (*CiEnvMapping, error) { ciEnvMapping := &CiEnvMapping{} err := impl.dbConnection.Model(ciEnvMapping). Where("ci_pipeline_id= ?", ciPipelineId). @@ -349,7 +362,7 @@ func (impl CiPipelineRepositoryImpl) FindCiEnvMappingByCiPipelineId(ciPipelineId return ciEnvMapping, err } -func (impl CiPipelineRepositoryImpl) FindWithMinDataByCiPipelineId(id int) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindWithMinDataByCiPipelineId(id int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{Id: id} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "CiTemplate"). @@ -360,7 +373,7 @@ func (impl CiPipelineRepositoryImpl) FindWithMinDataByCiPipelineId(id int) (pipe return pipeline, err } -func (impl CiPipelineRepositoryImpl) FindParentCiPipelineMapByAppId(appId int) ([]*CiPipeline, []int, error) { +func (impl *CiPipelineRepositoryImpl) FindParentCiPipelineMapByAppId(appId int) ([]*CiPipeline, []int, error) { var parentCiPipelines []*CiPipeline var linkedCiPipelineIds []int queryLinked := `select * from ci_pipeline where id in (select parent_ci_pipeline from ci_pipeline where app_id=? and deleted=? and parent_ci_pipeline is not null) order by id asc;` @@ -379,7 +392,7 @@ func (impl CiPipelineRepositoryImpl) FindParentCiPipelineMapByAppId(appId int) ( return parentCiPipelines, linkedCiPipelineIds, nil } -func (impl CiPipelineRepositoryImpl) PipelineExistsByName(names []string) (found []string, err error) { +func (impl *CiPipelineRepositoryImpl) PipelineExistsByName(names []string) (found []string, err error) { var name []string err = impl.dbConnection.Model((*CiPipeline)(nil)). Where("name in (?)", pg.In(names)). @@ -390,7 +403,7 @@ func (impl CiPipelineRepositoryImpl) PipelineExistsByName(names []string) (found } -func (impl CiPipelineRepositoryImpl) FindByCiAndAppDetailsById(pipelineId int) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindByCiAndAppDetailsById(pipelineId int) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "App"). @@ -401,7 +414,7 @@ func (impl CiPipelineRepositoryImpl) FindByCiAndAppDetailsById(pipelineId int) ( return pipeline, err } -func (impl CiPipelineRepositoryImpl) FindByName(pipelineName string) (pipeline *CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindByName(pipelineName string) (pipeline *CiPipeline, err error) { pipeline = &CiPipeline{} err = impl.dbConnection.Model(pipeline). Column("ci_pipeline.*", "App"). @@ -414,7 +427,7 @@ func (impl CiPipelineRepositoryImpl) FindByName(pipelineName string) (pipeline * return pipeline, err } -func (impl CiPipelineRepositoryImpl) CheckIfPipelineExistsByNameAndAppId(pipelineName string, appId int) (bool, error) { +func (impl *CiPipelineRepositoryImpl) CheckIfPipelineExistsByNameAndAppId(pipelineName string, appId int) (bool, error) { pipeline := &CiPipeline{} found, err := impl.dbConnection.Model(pipeline). Column("ci_pipeline.*"). @@ -426,8 +439,8 @@ func (impl CiPipelineRepositoryImpl) CheckIfPipelineExistsByNameAndAppId(pipelin return found, err } -func (impl CiPipelineRepositoryImpl) FetchParentCiPipelinesForDG() ([]*CiPipelinesMap, error) { - var ciPipelinesMap []*CiPipelinesMap +func (impl *CiPipelineRepositoryImpl) FetchParentCiPipelinesForDG() ([]*bean.CiPipelinesMap, error) { + var ciPipelinesMap []*bean.CiPipelinesMap query := "SELECT cip.id, cip.parent_ci_pipeline" + " FROM ci_pipeline cip" + " WHERE cip.external = TRUE and cip.parent_ci_pipeline > 0 and cip.parent_ci_pipeline IS NOT NULL and cip.deleted = FALSE" @@ -439,16 +452,7 @@ func (impl CiPipelineRepositoryImpl) FetchParentCiPipelinesForDG() ([]*CiPipelin return ciPipelinesMap, err } -type CiPipelinesMap struct { - Id int `json:"id"` - ParentCiPipeline int `json:"parentCiPipeline"` -} -type ConnectedPipelinesMap struct { - Id int `json:"id"` - Count int `json:"count"` -} - -func (impl CiPipelineRepositoryImpl) FetchCiPipelinesForDG(parentId int, childCiPipelineIds []int) (*CiPipeline, int, error) { +func (impl *CiPipelineRepositoryImpl) FetchCiPipelinesForDG(parentId int, childCiPipelineIds []int) (*CiPipeline, int, error) { pipeline := &CiPipeline{} count := 0 if len(childCiPipelineIds) > 0 { @@ -469,6 +473,7 @@ func (impl CiPipelineRepositoryImpl) FetchCiPipelinesForDG(parentId int, childCi return pipeline, count, err } +// TODO remove this util and use pg.In() func sqlIntSeq(ns []int) string { if len(ns) == 0 { return "" @@ -497,7 +502,7 @@ func (impl *CiPipelineRepositoryImpl) FinDByParentCiPipelineAndAppId(parentCiPip return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindAllPipelineInLast24Hour() (pipelines []*CiPipeline, err error) { +func (impl *CiPipelineRepositoryImpl) FindAllPipelineInLast24Hour() (pipelines []*CiPipeline, err error) { err = impl.dbConnection.Model(&pipelines). Column("ci_pipeline.*"). Where("created_on > ?", time.Now().AddDate(0, 0, -1)). @@ -505,7 +510,7 @@ func (impl CiPipelineRepositoryImpl) FindAllPipelineInLast24Hour() (pipelines [] return pipelines, err } -func (impl CiPipelineRepositoryImpl) FindNumberOfAppsWithCiPipeline(appIds []int) (count int, err error) { +func (impl *CiPipelineRepositoryImpl) FindNumberOfAppsWithCiPipeline(appIds []int) (count int, err error) { var ciPipelines []*CiPipeline count, err = impl.dbConnection. Model(&ciPipelines). @@ -520,7 +525,7 @@ func (impl CiPipelineRepositoryImpl) FindNumberOfAppsWithCiPipeline(appIds []int return count, nil } -func (impl CiPipelineRepositoryImpl) FindAppAndProjectByCiPipelineIds(ciPipelineIds []int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindAppAndProjectByCiPipelineIds(ciPipelineIds []int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines).Column("ci_pipeline.*", "App", "App.Team"). Where("ci_pipeline.id in(?)", pg.In(ciPipelineIds)). @@ -529,7 +534,7 @@ func (impl CiPipelineRepositoryImpl) FindAppAndProjectByCiPipelineIds(ciPipeline return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindCiPipelineConfigsByIds(ids []int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindCiPipelineConfigsByIds(ids []int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). Column("ci_pipeline.*", "App", "CiPipelineMaterials", "CiTemplate", "CiTemplate.DockerRegistry", "CiPipelineMaterials.GitMaterial"). @@ -539,7 +544,7 @@ func (impl CiPipelineRepositoryImpl) FindCiPipelineConfigsByIds(ids []int) ([]*C return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineIds(parentCiPipelineIds []int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindByParentCiPipelineIds(parentCiPipelineIds []int) ([]*CiPipeline, error) { var ciPipelines []*CiPipeline err := impl.dbConnection.Model(&ciPipelines). Where("parent_ci_pipeline in (?)", pg.In(parentCiPipelineIds)). @@ -548,7 +553,7 @@ func (impl CiPipelineRepositoryImpl) FindByParentCiPipelineIds(parentCiPipelineI return ciPipelines, err } -func (impl CiPipelineRepositoryImpl) FindAppIdsForCiPipelineIds(pipelineIds []int) (map[int]int, error) { +func (impl *CiPipelineRepositoryImpl) FindAppIdsForCiPipelineIds(pipelineIds []int) (map[int]int, error) { ciPipelineIdVsAppId := make(map[int]int, 0) if len(pipelineIds) == 0 { return ciPipelineIdVsAppId, nil @@ -569,7 +574,7 @@ func (impl CiPipelineRepositoryImpl) FindAppIdsForCiPipelineIds(pipelineIds []in return ciPipelineIdVsAppId, nil } -func (impl CiPipelineRepositoryImpl) GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) GetCiPipelineByArtifactId(artifactId int) (*CiPipeline, error) { ciPipeline := &CiPipeline{} err := impl.dbConnection.Model(ciPipeline). Column("ci_pipeline.*"). @@ -579,7 +584,8 @@ func (impl CiPipelineRepositoryImpl) GetCiPipelineByArtifactId(artifactId int) ( Select() return ciPipeline, err } -func (impl CiPipelineRepositoryImpl) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) { + +func (impl *CiPipelineRepositoryImpl) GetExternalCiPipelineByArtifactId(artifactId int) (*ExternalCiPipeline, error) { ciPipeline := &ExternalCiPipeline{} query := "SELECT ecp.* " + " FROM external_ci_pipeline ecp " + @@ -589,7 +595,7 @@ func (impl CiPipelineRepositoryImpl) GetExternalCiPipelineByArtifactId(artifactI return ciPipeline, err } -func (impl CiPipelineRepositoryImpl) FindCiPipelineByAppIdAndEnvIds(appId int, envIds []int) ([]*CiPipeline, error) { +func (impl *CiPipelineRepositoryImpl) FindCiPipelineByAppIdAndEnvIds(appId int, envIds []int) ([]*CiPipeline, error) { var pipelines []*CiPipeline query := `SELECT DISTINCT ci_pipeline.* FROM ci_pipeline INNER JOIN pipeline ON pipeline.ci_pipeline_id = ci_pipeline.id WHERE ci_pipeline.app_id = ? AND pipeline.environment_id IN (?) AND ci_pipeline.deleted = false AND pipeline.deleted = false;` @@ -597,10 +603,11 @@ func (impl CiPipelineRepositoryImpl) FindCiPipelineByAppIdAndEnvIds(appId int, e return pipelines, err } -func (impl CiPipelineRepositoryImpl) FindLinkedCiCount(ciPipelineId int) (int, error) { +func (impl *CiPipelineRepositoryImpl) FindLinkedCiCount(ciPipelineId int) (int, error) { pipeline := &CiPipeline{} cnt, err := impl.dbConnection.Model(pipeline). Where("parent_ci_pipeline = ?", ciPipelineId). + Where("ci_pipeline_type != ?", ciPipelineBean.LINKED_CD). Where("deleted = ?", false). Count() if err == pg.ErrNoRows { @@ -608,3 +615,80 @@ func (impl CiPipelineRepositoryImpl) FindLinkedCiCount(ciPipelineId int) (int, e } return cnt, err } + +func (impl *CiPipelineRepositoryImpl) GetLinkedCiPipelines(ctx context.Context, ciPipelineId int) ([]*CiPipeline, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "GetLinkedCiPipelines") + defer span.End() + var linkedCIPipelines []*CiPipeline + err := impl.dbConnection.Model(&linkedCIPipelines). + Where("parent_ci_pipeline = ?", ciPipelineId). + Where("ci_pipeline_type != ?", ciPipelineBean.LINKED_CD). + Where("deleted = ?", false). + Select() + if err != nil { + return nil, err + } + return linkedCIPipelines, nil +} + +func (impl *CiPipelineRepositoryImpl) GetDownStreamInfo(ctx context.Context, sourceCiPipelineId int, + appNameMatch, envNameMatch string, req *pagination.RepositoryRequest) ([]bean.LinkedCIDetails, int, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "GetDownStreamInfo") + defer span.End() + linkedCIDetails := make([]bean.LinkedCIDetails, 0) + query := impl.dbConnection.Model(). + Table("ci_pipeline"). + // added columns that has no duplicated reference across joined tables + Column("ci_pipeline.app_id"). + // added columns that has duplicated reference across joined tables and assign alias name + ColumnExpr("a.app_name as app_name"). + ColumnExpr("e.environment_name as environment_name"). + ColumnExpr("p.id as pipeline_id"). + ColumnExpr("p.trigger_type as trigger_mode"). + ColumnExpr("p.environment_id as environment_id"). + // join app table + Join("INNER JOIN app a"). + JoinOn("a.id = ci_pipeline.app_id"). + JoinOn("a.active = ?", true). + // join pipeline table + Join("LEFT JOIN pipeline p"). + JoinOn("p.ci_pipeline_id = ci_pipeline.id"). + JoinOn("p.deleted = ?", false). + // join environment table + Join("LEFT JOIN environment e"). + JoinOn("e.id = p.environment_id"). + JoinOn("e.active = ?", true). + // constrains + Where("ci_pipeline.parent_ci_pipeline = ?", sourceCiPipelineId). + Where("ci_pipeline.ci_pipeline_type != ?", ciPipelineBean.LINKED_CD). + Where("ci_pipeline.deleted = ?", false) + // app name filtering with lower case + if len(appNameMatch) != 0 { + query = query.Where("LOWER(a.app_name) LIKE ?", "%"+appNameMatch+"%") + } + // env name filtering + if len(envNameMatch) != 0 { + query = query.Where("e.environment_name = ?", envNameMatch) + } + // get total response count + totalCount, err := query.Count() + if err != nil { + return nil, 0, err + } + // query execution + if req != nil { + if len(req.SortBy) != 0 && len(req.Order) != 0 { + query = query.Order(fmt.Sprintf("%s %s", req.SortBy, string(req.Order))) + } + if req.Limit != 0 { + query = query.Limit(req.Limit). + Offset(req.Offset) + } + } + + err = query.Select(&linkedCIDetails) + if err != nil { + return nil, 0, err + } + return linkedCIDetails, totalCount, err +} diff --git a/internal/sql/repository/pipelineConfig/PipelineRepository.go b/internal/sql/repository/pipelineConfig/PipelineRepository.go index 83ec16e042..0d8fbbb7e8 100644 --- a/internal/sql/repository/pipelineConfig/PipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/PipelineRepository.go @@ -18,6 +18,7 @@ package pipelineConfig import ( + "context" "github.com/devtron-labs/common-lib/utils/k8s/health" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/models" @@ -28,6 +29,7 @@ import ( "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" + "go.opentelemetry.io/otel" "go.uber.org/zap" "k8s.io/utils/pointer" "time" @@ -116,6 +118,8 @@ type PipelineRepository interface { FilterDeploymentDeleteRequestedPipelineIds(cdPipelineIds []int) (map[int]bool, error) FindDeploymentTypeByPipelineIds(cdPipelineIds []int) (map[int]DeploymentObject, error) UpdateOldCiPipelineIdToNewCiPipelineId(tx *pg.Tx, oldCiPipelineId, newCiPipelineId int) error + // FindWithEnvironmentByCiIds Possibility of duplicate environment names when filtered by unique pipeline ids + FindWithEnvironmentByCiIds(ctx context.Context, cIPipelineIds []int) ([]*Pipeline, error) } type CiArtifactDTO struct { @@ -730,3 +734,16 @@ func (impl PipelineRepositoryImpl) UpdateOldCiPipelineIdToNewCiPipelineId(tx *pg Where("deleted = ?", false).Update() return err } +func (impl PipelineRepositoryImpl) FindWithEnvironmentByCiIds(ctx context.Context, cIPipelineIds []int) ([]*Pipeline, error) { + _, span := otel.Tracer("orchestrator").Start(ctx, "FindWithEnvironmentByCiIds") + defer span.End() + var cDPipelines []*Pipeline + err := impl.dbConnection.Model(&cDPipelines). + Column("pipeline.*", "Environment"). + Where("ci_pipeline_id in (?)", pg.In(cIPipelineIds)). + Select() + if err != nil { + return nil, err + } + return cDPipelines, nil +} diff --git a/internal/sql/repository/pipelineConfig/bean/CiPipelineBean.go b/internal/sql/repository/pipelineConfig/bean/CiPipelineBean.go new file mode 100644 index 0000000000..5e7723abfd --- /dev/null +++ b/internal/sql/repository/pipelineConfig/bean/CiPipelineBean.go @@ -0,0 +1,15 @@ +package bean + +type LinkedCIDetails struct { + AppName string `sql:"app_name"` + EnvironmentName string `sql:"environment_name"` + TriggerMode string `sql:"trigger_mode"` + PipelineId int `sql:"pipeline_id"` + AppId int `sql:"app_id"` + EnvironmentId int `sql:"environment_id"` +} + +type CiPipelinesMap struct { + Id int `json:"id"` + ParentCiPipeline int `json:"parentCiPipeline"` +} diff --git a/internal/sql/repository/pipelineConfig/mocks/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/mocks/CiPipelineRepository.go index d062a059fc..b2fdcf4189 100644 --- a/internal/sql/repository/pipelineConfig/mocks/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/mocks/CiPipelineRepository.go @@ -1,11 +1,18 @@ -// Code generated by mockery v2.20.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks import ( - pg "github.com/go-pg/pg" + context "context" + + bean "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean" + mock "github.com/stretchr/testify/mock" + pagination "github.com/devtron-labs/devtron/util/response/pagination" + + pg "github.com/go-pg/pg" + pipelineConfig "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" ) @@ -14,6 +21,44 @@ type CiPipelineRepository struct { mock.Mock } +// CheckIfPipelineExistsByNameAndAppId provides a mock function with given fields: pipelineName, appId +func (_m *CiPipelineRepository) CheckIfPipelineExistsByNameAndAppId(pipelineName string, appId int) (bool, error) { + ret := _m.Called(pipelineName, appId) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(string, int) (bool, error)); ok { + return rf(pipelineName, appId) + } + if rf, ok := ret.Get(0).(func(string, int) bool); ok { + r0 = rf(pipelineName, appId) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(string, int) error); ok { + r1 = rf(pipelineName, appId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitTx provides a mock function with given fields: tx +func (_m *CiPipelineRepository) CommitTx(tx *pg.Tx) error { + ret := _m.Called(tx) + + var r0 error + if rf, ok := ret.Get(0).(func(*pg.Tx) error); ok { + r0 = rf(tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // FetchCiPipelinesForDG provides a mock function with given fields: parentId, childCiPipelineIds func (_m *CiPipelineRepository) FetchCiPipelinesForDG(parentId int, childCiPipelineIds []int) (*pipelineConfig.CiPipeline, int, error) { ret := _m.Called(parentId, childCiPipelineIds) @@ -48,19 +93,19 @@ func (_m *CiPipelineRepository) FetchCiPipelinesForDG(parentId int, childCiPipel } // FetchParentCiPipelinesForDG provides a mock function with given fields: -func (_m *CiPipelineRepository) FetchParentCiPipelinesForDG() ([]*pipelineConfig.CiPipelinesMap, error) { +func (_m *CiPipelineRepository) FetchParentCiPipelinesForDG() ([]*bean.CiPipelinesMap, error) { ret := _m.Called() - var r0 []*pipelineConfig.CiPipelinesMap + var r0 []*bean.CiPipelinesMap var r1 error - if rf, ok := ret.Get(0).(func() ([]*pipelineConfig.CiPipelinesMap, error)); ok { + if rf, ok := ret.Get(0).(func() ([]*bean.CiPipelinesMap, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() []*pipelineConfig.CiPipelinesMap); ok { + if rf, ok := ret.Get(0).(func() []*bean.CiPipelinesMap); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*pipelineConfig.CiPipelinesMap) + r0 = ret.Get(0).([]*bean.CiPipelinesMap) } } @@ -281,6 +326,32 @@ func (_m *CiPipelineRepository) FindById(id int) (*pipelineConfig.CiPipeline, er return r0, r1 } +// FindByIdIncludingInActive provides a mock function with given fields: id +func (_m *CiPipelineRepository) FindByIdIncludingInActive(id int) (*pipelineConfig.CiPipeline, error) { + ret := _m.Called(id) + + var r0 *pipelineConfig.CiPipeline + var r1 error + if rf, ok := ret.Get(0).(func(int) (*pipelineConfig.CiPipeline, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int) *pipelineConfig.CiPipeline); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pipelineConfig.CiPipeline) + } + } + + if rf, ok := ret.Get(1).(func(int) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // FindByIdsIn provides a mock function with given fields: ids func (_m *CiPipelineRepository) FindByIdsIn(ids []int) ([]*pipelineConfig.CiPipeline, error) { ret := _m.Called(ids) @@ -385,6 +456,32 @@ func (_m *CiPipelineRepository) FindByParentCiPipelineIds(parentCiPipelineIds [] return r0, r1 } +// FindByParentIdAndType provides a mock function with given fields: parentCiPipelineId, pipelineType +func (_m *CiPipelineRepository) FindByParentIdAndType(parentCiPipelineId int, pipelineType string) ([]*pipelineConfig.CiPipeline, error) { + ret := _m.Called(parentCiPipelineId, pipelineType) + + var r0 []*pipelineConfig.CiPipeline + var r1 error + if rf, ok := ret.Get(0).(func(int, string) ([]*pipelineConfig.CiPipeline, error)); ok { + return rf(parentCiPipelineId, pipelineType) + } + if rf, ok := ret.Get(0).(func(int, string) []*pipelineConfig.CiPipeline); ok { + r0 = rf(parentCiPipelineId, pipelineType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*pipelineConfig.CiPipeline) + } + } + + if rf, ok := ret.Get(1).(func(int, string) error); ok { + r1 = rf(parentCiPipelineId, pipelineType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // FindCiEnvMappingByCiPipelineId provides a mock function with given fields: ciPipelineId func (_m *CiPipelineRepository) FindCiEnvMappingByCiPipelineId(ciPipelineId int) (*pipelineConfig.CiEnvMapping, error) { ret := _m.Called(ciPipelineId) @@ -411,6 +508,32 @@ func (_m *CiPipelineRepository) FindCiEnvMappingByCiPipelineId(ciPipelineId int) return r0, r1 } +// FindCiPipelineByAppIdAndEnvIds provides a mock function with given fields: appId, envIds +func (_m *CiPipelineRepository) FindCiPipelineByAppIdAndEnvIds(appId int, envIds []int) ([]*pipelineConfig.CiPipeline, error) { + ret := _m.Called(appId, envIds) + + var r0 []*pipelineConfig.CiPipeline + var r1 error + if rf, ok := ret.Get(0).(func(int, []int) ([]*pipelineConfig.CiPipeline, error)); ok { + return rf(appId, envIds) + } + if rf, ok := ret.Get(0).(func(int, []int) []*pipelineConfig.CiPipeline); ok { + r0 = rf(appId, envIds) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*pipelineConfig.CiPipeline) + } + } + + if rf, ok := ret.Get(1).(func(int, []int) error); ok { + r1 = rf(appId, envIds) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // FindCiPipelineConfigsByIds provides a mock function with given fields: ids func (_m *CiPipelineRepository) FindCiPipelineConfigsByIds(ids []int) ([]*pipelineConfig.CiPipeline, error) { ret := _m.Called(ids) @@ -593,6 +716,30 @@ func (_m *CiPipelineRepository) FindExternalCiById(id int) (*pipelineConfig.Exte return r0, r1 } +// FindLinkedCiCount provides a mock function with given fields: ciPipelineId +func (_m *CiPipelineRepository) FindLinkedCiCount(ciPipelineId int) (int, error) { + ret := _m.Called(ciPipelineId) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(int) (int, error)); ok { + return rf(ciPipelineId) + } + if rf, ok := ret.Get(0).(func(int) int); ok { + r0 = rf(ciPipelineId) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(int) error); ok { + r1 = rf(ciPipelineId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // FindNumberOfAppsWithCiPipeline provides a mock function with given fields: appIds func (_m *CiPipelineRepository) FindNumberOfAppsWithCiPipeline(appIds []int) (int, error) { ret := _m.Called(appIds) @@ -617,6 +764,32 @@ func (_m *CiPipelineRepository) FindNumberOfAppsWithCiPipeline(appIds []int) (in return r0, r1 } +// FindOneWithAppData provides a mock function with given fields: id +func (_m *CiPipelineRepository) FindOneWithAppData(id int) (*pipelineConfig.CiPipeline, error) { + ret := _m.Called(id) + + var r0 *pipelineConfig.CiPipeline + var r1 error + if rf, ok := ret.Get(0).(func(int) (*pipelineConfig.CiPipeline, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(int) *pipelineConfig.CiPipeline); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pipelineConfig.CiPipeline) + } + } + + if rf, ok := ret.Get(1).(func(int) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // FindParentCiPipelineMapByAppId provides a mock function with given fields: appId func (_m *CiPipelineRepository) FindParentCiPipelineMapByAppId(appId int) ([]*pipelineConfig.CiPipeline, []int, error) { ret := _m.Called(appId) @@ -704,6 +877,39 @@ func (_m *CiPipelineRepository) GetCiPipelineByArtifactId(artifactId int) (*pipe return r0, r1 } +// GetDownStreamInfo provides a mock function with given fields: ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order +func (_m *CiPipelineRepository) GetDownStreamInfo(ctx context.Context, sourceCiPipelineId int, limit int, offset int, appNameMatch string, envNameMatch string, order pagination.SortOrder) ([]bean.LinkedCIDetails, int, error) { + ret := _m.Called(ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order) + + var r0 []bean.LinkedCIDetails + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, int, int, int, string, string, pagination.SortOrder) ([]bean.LinkedCIDetails, int, error)); ok { + return rf(ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order) + } + if rf, ok := ret.Get(0).(func(context.Context, int, int, int, string, string, pagination.SortOrder) []bean.LinkedCIDetails); ok { + r0 = rf(ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]bean.LinkedCIDetails) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int, int, int, string, string, pagination.SortOrder) int); ok { + r1 = rf(ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(context.Context, int, int, int, string, string, pagination.SortOrder) error); ok { + r2 = rf(ctx, sourceCiPipelineId, limit, offset, appNameMatch, envNameMatch, order) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetExternalCiPipelineByArtifactId provides a mock function with given fields: artifactId func (_m *CiPipelineRepository) GetExternalCiPipelineByArtifactId(artifactId int) (*pipelineConfig.ExternalCiPipeline, error) { ret := _m.Called(artifactId) @@ -730,6 +936,32 @@ func (_m *CiPipelineRepository) GetExternalCiPipelineByArtifactId(artifactId int return r0, r1 } +// GetLinkedCiPipelines provides a mock function with given fields: ctx, ciPipelineId +func (_m *CiPipelineRepository) GetLinkedCiPipelines(ctx context.Context, ciPipelineId int) ([]*pipelineConfig.CiPipeline, error) { + ret := _m.Called(ctx, ciPipelineId) + + var r0 []*pipelineConfig.CiPipeline + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, int) ([]*pipelineConfig.CiPipeline, error)); ok { + return rf(ctx, ciPipelineId) + } + if rf, ok := ret.Get(0).(func(context.Context, int) []*pipelineConfig.CiPipeline); ok { + r0 = rf(ctx, ciPipelineId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*pipelineConfig.CiPipeline) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, int) error); ok { + r1 = rf(ctx, ciPipelineId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // MarkCiPipelineScriptsInactiveByCiPipelineId provides a mock function with given fields: ciPipelineId, tx func (_m *CiPipelineRepository) MarkCiPipelineScriptsInactiveByCiPipelineId(ciPipelineId int, tx *pg.Tx) error { ret := _m.Called(ciPipelineId, tx) @@ -770,6 +1002,20 @@ func (_m *CiPipelineRepository) PipelineExistsByName(names []string) ([]string, return r0, r1 } +// RollbackTx provides a mock function with given fields: tx +func (_m *CiPipelineRepository) RollbackTx(tx *pg.Tx) error { + ret := _m.Called(tx) + + var r0 error + if rf, ok := ret.Get(0).(func(*pg.Tx) error); ok { + r0 = rf(tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Save provides a mock function with given fields: pipeline, tx func (_m *CiPipelineRepository) Save(pipeline *pipelineConfig.CiPipeline, tx *pg.Tx) error { ret := _m.Called(pipeline, tx) @@ -838,6 +1084,32 @@ func (_m *CiPipelineRepository) SaveExternalCi(pipeline *pipelineConfig.External return r0, r1 } +// StartTx provides a mock function with given fields: +func (_m *CiPipelineRepository) StartTx() (*pg.Tx, error) { + ret := _m.Called() + + var r0 *pg.Tx + var r1 error + if rf, ok := ret.Get(0).(func() (*pg.Tx, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *pg.Tx); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pg.Tx) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Update provides a mock function with given fields: pipeline, tx func (_m *CiPipelineRepository) Update(pipeline *pipelineConfig.CiPipeline, tx *pg.Tx) error { ret := _m.Called(pipeline, tx) @@ -906,13 +1178,12 @@ func (_m *CiPipelineRepository) UpdateExternalCi(pipeline *pipelineConfig.Extern return r0, r1 } -type mockConstructorTestingTNewCiPipelineRepository interface { +// NewCiPipelineRepository creates a new instance of CiPipelineRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCiPipelineRepository(t interface { mock.TestingT Cleanup(func()) -} - -// NewCiPipelineRepository creates a new instance of CiPipelineRepository. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCiPipelineRepository(t mockConstructorTestingTNewCiPipelineRepository) *CiPipelineRepository { +}) *CiPipelineRepository { mock := &CiPipelineRepository{} mock.Mock.Test(t) diff --git a/pkg/appClone/batch/Mocks_test.go b/pkg/appClone/batch/Mocks_test.go index a6c52d5cc8..e025719603 100644 --- a/pkg/appClone/batch/Mocks_test.go +++ b/pkg/appClone/batch/Mocks_test.go @@ -24,9 +24,11 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + bean3 "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/pipeline" + pipelineBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "go.uber.org/zap" ) @@ -77,37 +79,37 @@ func (repo AppRepositoryMock) FindAppsByEnvironmentId(environmentId int) ([]app. // -------------- type ConfigMapServiceMock struct{} -func (impl ConfigMapServiceMock) CMGlobalAddUpdate(configMapRequest *pipeline.ConfigDataRequest) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CMGlobalAddUpdate(configMapRequest *pipelineBean.ConfigDataRequest) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CMGlobalFetch(appId int) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CMGlobalFetch(appId int) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CMEnvironmentAddUpdate(configMapRequest *pipeline.ConfigDataRequest) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CMEnvironmentAddUpdate(configMapRequest *pipelineBean.ConfigDataRequest) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CMEnvironmentFetch(appId int, envId int) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CMEnvironmentFetch(appId int, envId int) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } // --------------------------------------------------------------------------------------------- -func (impl ConfigMapServiceMock) CSGlobalAddUpdate(configMapRequest *pipeline.ConfigDataRequest) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSGlobalAddUpdate(configMapRequest *pipelineBean.ConfigDataRequest) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CSGlobalFetch(appId int) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSGlobalFetch(appId int) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CSEnvironmentAddUpdate(configMapRequest *pipeline.ConfigDataRequest) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSEnvironmentAddUpdate(configMapRequest *pipelineBean.ConfigDataRequest) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CSEnvironmentFetch(appId int, envId int) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSEnvironmentFetch(appId int, envId int) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } @@ -145,11 +147,11 @@ func (impl ConfigMapServiceMock) CSEnvironmentDeleteByAppIdAndEnvId(name string, //// -func (impl ConfigMapServiceMock) CSGlobalFetchForEdit(name string, id int, userId int32) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSGlobalFetchForEdit(name string, id int, userId int32) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } -func (impl ConfigMapServiceMock) CSEnvironmentFetchForEdit(name string, id int, appId int, envId int, userId int32) (*pipeline.ConfigDataRequest, error) { +func (impl ConfigMapServiceMock) CSEnvironmentFetchForEdit(name string, id int, appId int, envId int, userId int32) (*pipelineBean.ConfigDataRequest, error) { panic("implement me") } @@ -373,7 +375,7 @@ func (impl CiPipelineRepositoryMock) FindByParentCiPipelineId(parentCiPipelineId panic("implement me") } -func (impl CiPipelineRepositoryMock) FetchParentCiPipelinesForDG() ([]*pipelineConfig.CiPipelinesMap, error) { +func (impl CiPipelineRepositoryMock) FetchParentCiPipelinesForDG() ([]*bean3.CiPipelinesMap, error) { panic("implement me") } func (impl CiPipelineRepositoryMock) FetchCiPipelinesForDG(parentId int, childCiPipelineIds []int) (*pipelineConfig.CiPipeline, int, error) { diff --git a/pkg/bean/app.go b/pkg/bean/app.go index 429239bdd4..c3f9f5d618 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -28,6 +28,7 @@ import ( "github.com/devtron-labs/devtron/pkg/chartRepo/repository" bean3 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + CiPipeline2 "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/repository" "strings" "time" @@ -104,57 +105,57 @@ type CiMaterial struct { } type CiPipeline struct { - IsManual bool `json:"isManual"` - DockerArgs map[string]string `json:"dockerArgs"` - IsExternal bool `json:"isExternal"` - ParentCiPipeline int `json:"parentCiPipeline"` - ParentAppId int `json:"parentAppId"` - AppId int `json:"appId"` - AppName string `json:"appName,omitempty"` - AppType helper.AppType `json:"appType,omitempty"` - ExternalCiConfig ExternalCiConfig `json:"externalCiConfig"` - CiMaterial []*CiMaterial `json:"ciMaterial,omitempty" validate:"dive,min=1"` - Name string `json:"name,omitempty" validate:"name-component,max=100"` //name suffix of corresponding pipeline. required, unique, validation corresponding to gocd pipelineName will be applicable - Id int `json:"id,omitempty" ` - Version string `json:"version,omitempty"` //matchIf token version in gocd . used for update request - Active bool `json:"active,omitempty"` //pipeline is active or not - Deleted bool `json:"deleted,omitempty"` - BeforeDockerBuild []*Task `json:"beforeDockerBuild,omitempty" validate:"dive"` - AfterDockerBuild []*Task `json:"afterDockerBuild,omitempty" validate:"dive"` - BeforeDockerBuildScripts []*CiScript `json:"beforeDockerBuildScripts,omitempty" validate:"dive"` - AfterDockerBuildScripts []*CiScript `json:"afterDockerBuildScripts,omitempty" validate:"dive"` - LinkedCount int `json:"linkedCount"` - PipelineType bean.PipelineType `json:"pipelineType,omitempty"` - ScanEnabled bool `json:"scanEnabled,notnull"` - AppWorkflowId int `json:"appWorkflowId,omitempty"` - PreBuildStage *bean.PipelineStageDto `json:"preBuildStage,omitempty"` - PostBuildStage *bean.PipelineStageDto `json:"postBuildStage,omitempty"` - TargetPlatform string `json:"targetPlatform,omitempty"` - IsDockerConfigOverridden bool `json:"isDockerConfigOverridden"` - DockerConfigOverride DockerConfigOverride `json:"dockerConfigOverride,omitempty"` - EnvironmentId int `json:"environmentId,omitempty"` - LastTriggeredEnvId int `json:"lastTriggeredEnvId"` - CustomTagObject *CustomTagData `json:"customTag,omitempty"` - DefaultTag []string `json:"defaultTag,omitempty"` - EnableCustomTag bool `json:"enableCustomTag"` + IsManual bool `json:"isManual"` + DockerArgs map[string]string `json:"dockerArgs"` + IsExternal bool `json:"isExternal"` + ParentCiPipeline int `json:"parentCiPipeline"` + ParentAppId int `json:"parentAppId"` + AppId int `json:"appId"` + AppName string `json:"appName,omitempty"` + AppType helper.AppType `json:"appType,omitempty"` + ExternalCiConfig ExternalCiConfig `json:"externalCiConfig"` + CiMaterial []*CiMaterial `json:"ciMaterial,omitempty" validate:"dive,min=1"` + Name string `json:"name,omitempty" validate:"name-component,max=100"` //name suffix of corresponding pipeline. required, unique, validation corresponding to gocd pipelineName will be applicable + Id int `json:"id,omitempty" ` + Version string `json:"version,omitempty"` //matchIf token version in gocd . used for update request + Active bool `json:"active,omitempty"` //pipeline is active or not + Deleted bool `json:"deleted,omitempty"` + BeforeDockerBuild []*Task `json:"beforeDockerBuild,omitempty" validate:"dive"` + AfterDockerBuild []*Task `json:"afterDockerBuild,omitempty" validate:"dive"` + BeforeDockerBuildScripts []*CiScript `json:"beforeDockerBuildScripts,omitempty" validate:"dive"` + AfterDockerBuildScripts []*CiScript `json:"afterDockerBuildScripts,omitempty" validate:"dive"` + LinkedCount int `json:"linkedCount"` + PipelineType CiPipeline2.PipelineType `json:"pipelineType,omitempty"` + ScanEnabled bool `json:"scanEnabled,notnull"` + AppWorkflowId int `json:"appWorkflowId,omitempty"` + PreBuildStage *bean.PipelineStageDto `json:"preBuildStage,omitempty"` + PostBuildStage *bean.PipelineStageDto `json:"postBuildStage,omitempty"` + TargetPlatform string `json:"targetPlatform,omitempty"` + IsDockerConfigOverridden bool `json:"isDockerConfigOverridden"` + DockerConfigOverride DockerConfigOverride `json:"dockerConfigOverride,omitempty"` + EnvironmentId int `json:"environmentId,omitempty"` + LastTriggeredEnvId int `json:"lastTriggeredEnvId"` + CustomTagObject *CustomTagData `json:"customTag,omitempty"` + DefaultTag []string `json:"defaultTag,omitempty"` + EnableCustomTag bool `json:"enableCustomTag"` } type DockerConfigOverride struct { - DockerRegistry string `json:"dockerRegistry,omitempty"` - DockerRepository string `json:"dockerRepository,omitempty"` - CiBuildConfig *bean.CiBuildConfigBean `json:"ciBuildConfig,omitEmpty"` + DockerRegistry string `json:"dockerRegistry,omitempty"` + DockerRepository string `json:"dockerRepository,omitempty"` + CiBuildConfig *CiPipeline2.CiBuildConfigBean `json:"ciBuildConfig,omitEmpty"` //DockerBuildConfig *DockerBuildConfig `json:"dockerBuildConfig,omitempty"` } type CiPipelineMin struct { - Name string `json:"name,omitempty" validate:"name-component,max=100"` //name suffix of corresponding pipeline. required, unique, validation corresponding to gocd pipelineName will be applicable - Id int `json:"id,omitempty" ` - Version string `json:"version,omitempty"` //matchIf token version in gocd . used for update request - IsExternal bool `json:"isExternal,omitempty"` - ParentCiPipeline int `json:"parentCiPipeline"` - ParentAppId int `json:"parentAppId"` - PipelineType bean.PipelineType `json:"pipelineType,omitempty"` - ScanEnabled bool `json:"scanEnabled,notnull"` + Name string `json:"name,omitempty" validate:"name-component,max=100"` //name suffix of corresponding pipeline. required, unique, validation corresponding to gocd pipelineName will be applicable + Id int `json:"id,omitempty" ` + Version string `json:"version,omitempty"` //matchIf token version in gocd . used for update request + IsExternal bool `json:"isExternal,omitempty"` + ParentCiPipeline int `json:"parentCiPipeline"` + ParentAppId int `json:"parentAppId"` + PipelineType CiPipeline2.PipelineType `json:"pipelineType,omitempty"` + ScanEnabled bool `json:"scanEnabled,notnull"` } type CiScript struct { @@ -287,12 +288,12 @@ type CiPatchRequest struct { IsJob bool `json:"-"` IsCloneJob bool `json:"isCloneJob,omitempty"` - ParentCDPipeline int `json:"parentCDPipeline"` - DeployEnvId int `json:"deployEnvId"` - SwitchFromCiPipelineId int `json:"switchFromCiPipelineId"` - SwitchFromExternalCiPipelineId int `json:"switchFromExternalCiPipelineId"` - SwitchFromCiPipelineType bean.PipelineType `json:"-"` - SwitchToCiPipelineType bean.PipelineType `json:"-"` + ParentCDPipeline int `json:"parentCDPipeline"` + DeployEnvId int `json:"deployEnvId"` + SwitchFromCiPipelineId int `json:"switchFromCiPipelineId"` + SwitchFromExternalCiPipelineId int `json:"switchFromExternalCiPipelineId"` + SwitchFromCiPipelineType CiPipeline2.PipelineType `json:"-"` + SwitchToCiPipelineType CiPipeline2.PipelineType `json:"-"` } func (ciPatchRequest CiPatchRequest) IsSwitchCiPipelineRequest() bool { @@ -355,7 +356,7 @@ type CiConfigRequest struct { AppId int `json:"appId,omitempty" validate:"required,number"` DockerRegistry string `json:"dockerRegistry,omitempty" ` //repo id example ecr mapped one-one with gocd registry entry DockerRepository string `json:"dockerRepository,omitempty"` // example test-app-1 which is inside ecr - CiBuildConfig *bean.CiBuildConfigBean `json:"ciBuildConfig"` + CiBuildConfig *CiPipeline2.CiBuildConfigBean `json:"ciBuildConfig"` CiPipelines []*CiPipeline `json:"ciPipelines,omitempty" validate:"dive"` //a pipeline will be built for each ciMaterial AppName string `json:"appName,omitempty"` Version string `json:"version,omitempty"` //gocd etag used for edit purpose diff --git a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go index 12c85ebd8c..b183c82cb3 100644 --- a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go @@ -17,7 +17,8 @@ import ( "github.com/devtron-labs/devtron/pkg/imageDigestPolicy" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" - bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/plugin" @@ -103,7 +104,7 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err _, span = otel.Tracer("orchestrator").Start(ctx, "cdWorkflowService.SubmitWorkflow") cdStageWorkflowRequest.Pipeline = pipeline cdStageWorkflowRequest.Env = env - cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE + cdStageWorkflowRequest.Type = pipelineConfigBean.CD_WORKFLOW_PIPELINE_TYPE _, err = impl.cdWorkflowService.SubmitWorkflow(cdStageWorkflowRequest) span.End() err = impl.sendPreStageNotification(ctx, cdWf, pipeline) @@ -225,9 +226,9 @@ func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { var pipelineStageEntityType int if pipelineStage == types.PRE { - pipelineStageEntityType = bean3.EntityTypePreCD + pipelineStageEntityType = pipelineConfigBean.EntityTypePreCD } else { - pipelineStageEntityType = bean3.EntityTypePostCD + pipelineStageEntityType = pipelineConfigBean.EntityTypePostCD } customTagId := -1 var DockerImageTag string @@ -282,7 +283,7 @@ func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest } if len(savedCIArtifacts) > 0 { // if already present in ci artifact, return "image path already in use error" - return imagePathReservationIds, bean3.ErrImagePathInUse + return imagePathReservationIds, pipelineConfigBean.ErrImagePathInUse } imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) if err != nil { @@ -336,7 +337,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } - var ciProjectDetails []bean3.CiProjectDetails + var ciProjectDetails []pipelineConfigBean.CiProjectDetails var ciPipeline *pipelineConfig.CiPipeline if cdPipeline.CiPipelineId > 0 { ciPipeline, err = impl.ciPipelineRepository.FindById(cdPipeline.CiPipelineId) @@ -365,7 +366,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } - ciProjectDetail := bean3.CiProjectDetails{ + ciProjectDetail := pipelineConfigBean.CiProjectDetails{ GitRepository: ciMaterialCurrent.Material.GitConfiguration.URL, MaterialName: gitMaterial.Name, CheckoutPath: gitMaterial.CheckoutPath, @@ -373,7 +374,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow SourceType: m.Type, SourceValue: m.Value, Type: string(m.Type), - GitOptions: bean3.GitOptions{ + GitOptions: pipelineConfigBean.GitOptions{ UserName: gitMaterial.GitProvider.UserName, Password: gitMaterial.GitProvider.Password, SshPrivateKey: gitMaterial.GitProvider.SshPrivateKey, @@ -392,7 +393,7 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow return nil, err } ciProjectDetail.CommitTime = commitTime.Format(bean4.LayoutRFC3339) - } else if ciPipeline.PipelineType == string(bean3.CI_JOB) { + } else if ciPipeline.PipelineType == string(CiPipeline.CI_JOB) { // This has been done to resolve unmarshalling issue in ci-runner, in case of no commit time(eg- polling container images) ciProjectDetail.CommitTime = time.Time{}.Format(bean4.LayoutRFC3339) } else { @@ -417,9 +418,9 @@ func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflow var deployStageWfr pipelineConfig.CdWorkflowRunner var deployStageTriggeredByUserEmail string var pipelineReleaseCounter int - var preDeploySteps []*bean3.StepObject - var postDeploySteps []*bean3.StepObject - var refPluginsData []*bean3.RefPluginObject + var preDeploySteps []*pipelineConfigBean.StepObject + var postDeploySteps []*pipelineConfigBean.StepObject + var refPluginsData []*pipelineConfigBean.RefPluginObject //if pipeline_stage_steps present for pre-CD or post-CD then no need to add stageYaml to cdWorkflowRequest in that //case add PreDeploySteps and PostDeploySteps to cdWorkflowRequest, this is done for backward compatibility pipelineStage, err := impl.pipelineStageService.GetCdStageByCdPipelineIdAndStageType(cdPipeline.Id, runner.WorkflowType.WorkflowTypeToStageType()) @@ -912,15 +913,15 @@ func (impl *TriggerServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, return imagePathReservationIds, nil } -func setExtraEnvVariableInDeployStep(deploySteps []*bean3.StepObject, extraEnvVariables map[string]string, webhookAndCiData *gitSensorClient.WebhookAndCiData) { +func setExtraEnvVariableInDeployStep(deploySteps []*pipelineConfigBean.StepObject, extraEnvVariables map[string]string, webhookAndCiData *gitSensorClient.WebhookAndCiData) { for _, deployStep := range deploySteps { for variableKey, variableValue := range extraEnvVariables { if isExtraVariableDynamic(variableKey, webhookAndCiData) && deployStep.StepType == "INLINE" { - extraInputVar := &bean3.VariableObject{ + extraInputVar := &pipelineConfigBean.VariableObject{ Name: variableKey, Format: "STRING", Value: variableValue, - VariableType: bean3.VARIABLE_TYPE_REF_GLOBAL, + VariableType: pipelineConfigBean.VARIABLE_TYPE_REF_GLOBAL, ReferenceVariableName: variableKey, } deployStep.InputVars = append(deployStep.InputVars, extraInputVar) diff --git a/pkg/pipeline/AppArtifactManager.go b/pkg/pipeline/AppArtifactManager.go index 994143bfe4..8a31e1326b 100644 --- a/pkg/pipeline/AppArtifactManager.go +++ b/pkg/pipeline/AppArtifactManager.go @@ -19,6 +19,7 @@ package pipeline import ( argoApplication "github.com/devtron-labs/devtron/client/argocdServer/bean" + pipelineBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "sort" "strings" @@ -828,9 +829,9 @@ func (impl *AppArtifactManagerImpl) BuildArtifactsList(listingFilterOpts *bean.A return ciArtifacts, 0, "", totalCount, err } } else { - if listingFilterOpts.ParentStageType == WorklowTypePre { + if listingFilterOpts.ParentStageType == pipelineBean.WorkflowTypePre { listingFilterOpts.PluginStage = repository.PRE_CD - } else if listingFilterOpts.ParentStageType == WorklowTypePost { + } else if listingFilterOpts.ParentStageType == pipelineBean.WorkflowTypePost { listingFilterOpts.PluginStage = repository.POST_CD } ciArtifacts, totalCount, err = impl.BuildArtifactsForCdStageV2(listingFilterOpts) diff --git a/pkg/pipeline/BuildPipelineConfigService.go b/pkg/pipeline/BuildPipelineConfigService.go index 8303fc1db6..ce567efa0f 100644 --- a/pkg/pipeline/BuildPipelineConfigService.go +++ b/pkg/pipeline/BuildPipelineConfigService.go @@ -31,6 +31,7 @@ import ( "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/bean" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/pipeline/types" resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" @@ -570,7 +571,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipeline(appId int) (ciConfig *bea AfterDockerBuildScripts: afterDockerBuildScripts, ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, - PipelineType: pipelineConfigBean.PipelineType(pipeline.PipelineType), + PipelineType: CiPipeline.PipelineType(pipeline.PipelineType), } ciEnvMapping, err := impl.ciPipelineRepository.FindCiEnvMappingByCiPipelineId(pipeline.Id) if err != nil && err != pg.ErrNoRows { @@ -714,7 +715,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineById(pipelineId int) (ciPi AfterDockerBuildScripts: afterDockerBuildScripts, ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, - PipelineType: pipelineConfigBean.PipelineType(pipeline.PipelineType), + PipelineType: CiPipeline.PipelineType(pipeline.PipelineType), } customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pipelineConfigBean.EntityTypeCiPipelineId, strconv.Itoa(pipeline.Id)) if err != nil && err != pg.ErrNoRows { @@ -838,7 +839,7 @@ func (impl *CiPipelineConfigServiceImpl) GetTriggerViewCiPipeline(appId int) (*b ParentCiPipeline: pipeline.ParentCiPipeline, ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, - PipelineType: pipelineConfigBean.PipelineType(pipeline.PipelineType), + PipelineType: CiPipeline.PipelineType(pipeline.PipelineType), } if ciTemplateBean, ok := ciOverrideTemplateMap[pipeline.Id]; ok { templateOverride := ciTemplateBean.CiTemplateOverride @@ -1204,7 +1205,7 @@ func (impl *CiPipelineConfigServiceImpl) UpdateCiTemplate(updateRequest *bean.Ci } for _, ciTemplateOverride := range ciTemplateOverrides { if _, ok := ciPipelineIdsMap[ciTemplateOverride.CiPipelineId]; ok { - if ciPipelineIdsMap[ciTemplateOverride.CiPipelineId].PipelineType == string(pipelineConfigBean.CI_JOB) { + if ciPipelineIdsMap[ciTemplateOverride.CiPipelineId].PipelineType == string(CiPipeline.CI_JOB) { ciTemplateOverride.DockerRepository = updateRequest.DockerRepository ciTemplateOverride.DockerRegistryId = updateRequest.DockerRegistry _, err = impl.ciTemplateOverrideRepository.Update(ciTemplateOverride) @@ -1236,7 +1237,7 @@ func (impl *CiPipelineConfigServiceImpl) handlePipelineCreate(request *bean.CiPa if pipelineExists { err = &utils.ApiError{Code: "400", HttpStatusCode: 400, UserMessage: "pipeline name already exist"} impl.logger.Errorw("pipeline name already exist", "err", err, "patch cipipeline name", request.CiPipeline.Name) - return nil, fmt.Errorf(pipelineConfigBean.PIPELINE_NAME_ALREADY_EXISTS_ERROR) + return nil, fmt.Errorf(CiPipeline.PIPELINE_NAME_ALREADY_EXISTS_ERROR) } if request.IsSwitchCiPipelineRequest() { @@ -1260,17 +1261,17 @@ func (impl *CiPipelineConfigServiceImpl) PatchCiPipeline(request *bean.CiPatchRe impl.logger.Errorw("err in fetching template for pipeline patch, ", "err", err, "appId", request.AppId) return nil, err } - if request.CiPipeline.PipelineType == pipelineConfigBean.CI_JOB { + if request.CiPipeline.PipelineType == CiPipeline.CI_JOB { request.CiPipeline.IsDockerConfigOverridden = true request.CiPipeline.DockerConfigOverride = bean.DockerConfigOverride{ DockerRegistry: ciConfig.DockerRegistry, DockerRepository: ciConfig.DockerRepository, - CiBuildConfig: &pipelineConfigBean.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 0, GitMaterialId: request.CiPipeline.CiMaterial[0].GitMaterialId, BuildContextGitMaterialId: request.CiPipeline.CiMaterial[0].GitMaterialId, UseRootBuildContext: false, - CiBuildType: pipelineConfigBean.SKIP_BUILD_TYPE, + CiBuildType: CiPipeline.SKIP_BUILD_TYPE, DockerBuildConfig: nil, BuildPackConfig: nil, }, @@ -1453,15 +1454,15 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineMin(appId int, envIds []in var ciPipelineResp []*bean.CiPipelineMin for _, pipeline := range pipelines { parentCiPipeline := pipelineConfig.CiPipeline{} - pipelineType := pipelineConfigBean.NORMAL + pipelineType := CiPipeline.NORMAL if pipelineParentCiMap[pipeline.Id] != nil { parentCiPipeline = *pipelineParentCiMap[pipeline.Id] - pipelineType = pipelineConfigBean.LINKED + pipelineType = CiPipeline.LINKED } else if pipeline.IsExternal == true { - pipelineType = pipelineConfigBean.EXTERNAL - } else if pipeline.PipelineType == string(pipelineConfigBean.CI_JOB) { - pipelineType = pipelineConfigBean.CI_JOB + pipelineType = CiPipeline.EXTERNAL + } else if pipeline.PipelineType == string(CiPipeline.CI_JOB) { + pipelineType = CiPipeline.CI_JOB } ciPipeline := &bean.CiPipelineMin{ @@ -1678,7 +1679,7 @@ func (impl *CiPipelineConfigServiceImpl) GetCiPipelineByEnvironment(request reso ExternalCiConfig: externalCiConfig, ScanEnabled: pipeline.ScanEnabled, IsDockerConfigOverridden: pipeline.IsDockerConfigOverridden, - PipelineType: pipelineConfigBean.PipelineType(pipeline.PipelineType), + PipelineType: CiPipeline.PipelineType(pipeline.PipelineType), } parentPipelineAppId, ok := pipelineIdVsAppId[parentCiPipelineId] if ok { diff --git a/pkg/pipeline/BuildPipelineSwitchService.go b/pkg/pipeline/BuildPipelineSwitchService.go index e325403e7b..c84b872e5f 100644 --- a/pkg/pipeline/BuildPipelineSwitchService.go +++ b/pkg/pipeline/BuildPipelineSwitchService.go @@ -5,7 +5,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/bean" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" - pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/history" repository4 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/sql" diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index 856100c032..ce0146d82a 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -37,7 +37,7 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/cluster" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" - bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + pipelineBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/pipeline/executors" "github.com/devtron-labs/devtron/pkg/pipeline/types" resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" @@ -127,12 +127,6 @@ func NewCdHandlerImpl(Logger *zap.SugaredLogger, userService user.UserService, return cdh } -const NotTriggered string = "Not Triggered" -const NotDeployed = "Not Deployed" -const WorklowTypeDeploy = "DEPLOY" -const WorklowTypePre = "PRE" -const WorklowTypePost = "POST" - func (impl *CdHandlerImpl) CancelStage(workflowRunnerId int, userId int32) (int, error) { workflowRunner, err := impl.cdWorkflowRepository.FindWorkflowRunnerById(workflowRunnerId) if err != nil { @@ -274,7 +268,7 @@ func (impl *CdHandlerImpl) extractWorkfowStatus(workflowStatus v1alpha1.Workflow podName := "" for k, v := range workflowStatus.Nodes { impl.Logger.Debugw("ExtractWorkflowStatus", "workflowName", k, "v", v) - if v.TemplateName == bean2.CD_WORKFLOW_NAME { + if v.TemplateName == pipelineBean.CD_WORKFLOW_NAME { if v.BoundaryID == "" { workflowName = k } else { @@ -855,11 +849,11 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerView(appId int) ([]*p cdWorkflowStatus := &pipelineConfig.CdWorkflowStatus{} cdWorkflowStatus.PipelineId = item.PipelineId cdWorkflowStatus.CiPipelineId = item.CiPipelineId - if item.WorkflowType == WorklowTypePre { + if item.WorkflowType == pipelineBean.WorkflowTypePre { cdWorkflowStatus.PreStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypeDeploy { + } else if item.WorkflowType == pipelineBean.WorkflowTypeDeploy { cdWorkflowStatus.DeployStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypePost { + } else if item.WorkflowType == pipelineBean.WorkflowTypePost { cdWorkflowStatus.PostStatus = statusMap[item.WfrId] } cdMap[item.PipelineId] = cdWorkflowStatus @@ -867,11 +861,11 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerView(appId int) ([]*p cdWorkflowStatus := cdMap[item.PipelineId] cdWorkflowStatus.PipelineId = item.PipelineId cdWorkflowStatus.CiPipelineId = item.CiPipelineId - if item.WorkflowType == WorklowTypePre { + if item.WorkflowType == pipelineBean.WorkflowTypePre { cdWorkflowStatus.PreStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypeDeploy { + } else if item.WorkflowType == pipelineBean.WorkflowTypeDeploy { cdWorkflowStatus.DeployStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypePost { + } else if item.WorkflowType == pipelineBean.WorkflowTypePost { cdWorkflowStatus.PostStatus = statusMap[item.WfrId] } cdMap[item.PipelineId] = cdWorkflowStatus @@ -881,13 +875,13 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerView(appId int) ([]*p for _, item := range cdMap { if item.PreStatus == "" { - item.PreStatus = NotTriggered + item.PreStatus = pipelineBean.NotTriggered } if item.DeployStatus == "" { - item.DeployStatus = NotDeployed + item.DeployStatus = pipelineBean.NotDeployed } if item.PostStatus == "" { - item.PostStatus = NotTriggered + item.PostStatus = pipelineBean.NotTriggered } cdWorkflowStatus = append(cdWorkflowStatus, item) } @@ -896,9 +890,9 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerView(appId int) ([]*p for _, item := range pipelineIds { cdWs := &pipelineConfig.CdWorkflowStatus{} cdWs.PipelineId = item - cdWs.PreStatus = NotTriggered - cdWs.DeployStatus = NotDeployed - cdWs.PostStatus = NotTriggered + cdWs.PreStatus = pipelineBean.NotTriggered + cdWs.DeployStatus = pipelineBean.NotDeployed + cdWs.PostStatus = pipelineBean.NotTriggered cdWorkflowStatus = append(cdWorkflowStatus, cdWs) } } else { @@ -906,9 +900,9 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerView(appId int) ([]*p if _, ok := cdMap[item]; !ok { cdWs := &pipelineConfig.CdWorkflowStatus{} cdWs.PipelineId = item - cdWs.PreStatus = NotTriggered - cdWs.DeployStatus = NotDeployed - cdWs.PostStatus = NotTriggered + cdWs.PreStatus = pipelineBean.NotTriggered + cdWs.DeployStatus = pipelineBean.NotDeployed + cdWs.PostStatus = pipelineBean.NotTriggered cdWorkflowStatus = append(cdWorkflowStatus, cdWs) } } @@ -1007,11 +1001,11 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerViewForEnvironment(re cdWorkflowStatus := &pipelineConfig.CdWorkflowStatus{} cdWorkflowStatus.PipelineId = item.PipelineId cdWorkflowStatus.CiPipelineId = item.CiPipelineId - if item.WorkflowType == WorklowTypePre { + if item.WorkflowType == pipelineBean.WorkflowTypePre { cdWorkflowStatus.PreStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypeDeploy { + } else if item.WorkflowType == pipelineBean.WorkflowTypeDeploy { cdWorkflowStatus.DeployStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypePost { + } else if item.WorkflowType == pipelineBean.WorkflowTypePost { cdWorkflowStatus.PostStatus = statusMap[item.WfrId] } cdMap[item.PipelineId] = cdWorkflowStatus @@ -1019,11 +1013,11 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerViewForEnvironment(re cdWorkflowStatus := cdMap[item.PipelineId] cdWorkflowStatus.PipelineId = item.PipelineId cdWorkflowStatus.CiPipelineId = item.CiPipelineId - if item.WorkflowType == WorklowTypePre { + if item.WorkflowType == pipelineBean.WorkflowTypePre { cdWorkflowStatus.PreStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypeDeploy { + } else if item.WorkflowType == pipelineBean.WorkflowTypeDeploy { cdWorkflowStatus.DeployStatus = statusMap[item.WfrId] - } else if item.WorkflowType == WorklowTypePost { + } else if item.WorkflowType == pipelineBean.WorkflowTypePost { cdWorkflowStatus.PostStatus = statusMap[item.WfrId] } cdMap[item.PipelineId] = cdWorkflowStatus @@ -1032,13 +1026,13 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerViewForEnvironment(re for _, item := range cdMap { if item.PreStatus == "" { - item.PreStatus = NotTriggered + item.PreStatus = pipelineBean.NotTriggered } if item.DeployStatus == "" { - item.DeployStatus = NotDeployed + item.DeployStatus = pipelineBean.NotDeployed } if item.PostStatus == "" { - item.PostStatus = NotTriggered + item.PostStatus = pipelineBean.NotTriggered } cdWorkflowStatus = append(cdWorkflowStatus, item) } @@ -1047,9 +1041,9 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerViewForEnvironment(re for _, item := range pipelineIds { cdWs := &pipelineConfig.CdWorkflowStatus{} cdWs.PipelineId = item - cdWs.PreStatus = NotTriggered - cdWs.DeployStatus = NotDeployed - cdWs.PostStatus = NotTriggered + cdWs.PreStatus = pipelineBean.NotTriggered + cdWs.DeployStatus = pipelineBean.NotDeployed + cdWs.PostStatus = pipelineBean.NotTriggered cdWorkflowStatus = append(cdWorkflowStatus, cdWs) } } else { @@ -1057,9 +1051,9 @@ func (impl *CdHandlerImpl) FetchAppWorkflowStatusForTriggerViewForEnvironment(re if _, ok := cdMap[item]; !ok { cdWs := &pipelineConfig.CdWorkflowStatus{} cdWs.PipelineId = item - cdWs.PreStatus = NotTriggered - cdWs.DeployStatus = NotDeployed - cdWs.PostStatus = NotTriggered + cdWs.PreStatus = pipelineBean.NotTriggered + cdWs.DeployStatus = pipelineBean.NotDeployed + cdWs.PostStatus = pipelineBean.NotTriggered cdWorkflowStatus = append(cdWorkflowStatus, cdWs) } } @@ -1146,7 +1140,7 @@ func (impl *CdHandlerImpl) FetchAppDeploymentStatusForEnvironments(request resou } for _, item := range wfrList { if item.Status == "" { - statusMap[item.Id] = NotDeployed + statusMap[item.Id] = pipelineBean.NotDeployed } else { statusMap[item.Id] = item.Status } @@ -1157,7 +1151,7 @@ func (impl *CdHandlerImpl) FetchAppDeploymentStatusForEnvironments(request resou if _, ok := deploymentStatusesMap[item.PipelineId]; !ok { deploymentStatus := &pipelineConfig.AppDeploymentStatus{} deploymentStatus.PipelineId = item.PipelineId - if item.WorkflowType == WorklowTypeDeploy { + if item.WorkflowType == pipelineBean.WorkflowTypeDeploy { deploymentStatus.DeployStatus = statusMap[item.WfrId] deploymentStatus.AppId = pipelineAppMap[deploymentStatus.PipelineId] deploymentStatusesMap[item.PipelineId] = deploymentStatus @@ -1169,7 +1163,7 @@ func (impl *CdHandlerImpl) FetchAppDeploymentStatusForEnvironments(request resou if _, ok := deploymentStatusesMap[pipelineId]; !ok { deploymentStatus := &pipelineConfig.AppDeploymentStatus{} deploymentStatus.PipelineId = pipelineId - deploymentStatus.DeployStatus = NotDeployed + deploymentStatus.DeployStatus = pipelineBean.NotDeployed deploymentStatus.AppId = pipelineAppMap[deploymentStatus.PipelineId] deploymentStatusesMap[pipelineId] = deploymentStatus } diff --git a/pkg/pipeline/CiBuildConfigService.go b/pkg/pipeline/CiBuildConfigService.go index f09ff7e98c..c10b50d35c 100644 --- a/pkg/pipeline/CiBuildConfigService.go +++ b/pkg/pipeline/CiBuildConfigService.go @@ -4,16 +4,16 @@ import ( "errors" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" - "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "go.uber.org/zap" "time" ) type CiBuildConfigService interface { - Save(templateId int, overrideTemplateId int, ciBuildConfigBean *bean.CiBuildConfigBean, userId int32) error - UpdateOrSave(templateId int, overrideTemplateId int, ciBuildConfig *bean.CiBuildConfigBean, userId int32) (*bean.CiBuildConfigBean, error) + Save(templateId int, overrideTemplateId int, ciBuildConfigBean *CiPipeline.CiBuildConfigBean, userId int32) error + UpdateOrSave(templateId int, overrideTemplateId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) (*CiPipeline.CiBuildConfigBean, error) Delete(ciBuildConfigId int) error - GetCountByBuildType() map[bean.CiBuildType]int + GetCountByBuildType() map[CiPipeline.CiBuildType]int } type CiBuildConfigServiceImpl struct { @@ -28,7 +28,7 @@ func NewCiBuildConfigServiceImpl(logger *zap.SugaredLogger, ciBuildConfigReposit } } -func (impl *CiBuildConfigServiceImpl) Save(templateId int, overrideTemplateId int, ciBuildConfigBean *bean.CiBuildConfigBean, userId int32) error { +func (impl *CiBuildConfigServiceImpl) Save(templateId int, overrideTemplateId int, ciBuildConfigBean *CiPipeline.CiBuildConfigBean, userId int32) error { ciBuildConfigEntity, err := adapter.ConvertBuildConfigBeanToDbEntity(templateId, overrideTemplateId, ciBuildConfigBean, userId) if err != nil { impl.Logger.Errorw("error occurred while converting build config to db entity", "templateId", templateId, @@ -46,7 +46,7 @@ func (impl *CiBuildConfigServiceImpl) Save(templateId int, overrideTemplateId in return nil } -func (impl *CiBuildConfigServiceImpl) UpdateOrSave(templateId int, overrideTemplateId int, ciBuildConfig *bean.CiBuildConfigBean, userId int32) (*bean.CiBuildConfigBean, error) { +func (impl *CiBuildConfigServiceImpl) UpdateOrSave(templateId int, overrideTemplateId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) (*CiPipeline.CiBuildConfigBean, error) { if ciBuildConfig == nil { impl.Logger.Warnw("not updating build config as object is empty", "ciBuildConfig", ciBuildConfig) return nil, nil @@ -76,14 +76,14 @@ func (impl *CiBuildConfigServiceImpl) Delete(ciBuildConfigId int) error { return impl.CiBuildConfigRepository.Delete(ciBuildConfigId) } -func (impl *CiBuildConfigServiceImpl) GetCountByBuildType() map[bean.CiBuildType]int { - result := make(map[bean.CiBuildType]int) +func (impl *CiBuildConfigServiceImpl) GetCountByBuildType() map[CiPipeline.CiBuildType]int { + result := make(map[CiPipeline.CiBuildType]int) buildTypeVsCount, err := impl.CiBuildConfigRepository.GetCountByBuildType() if err != nil { return result } for buildType, count := range buildTypeVsCount { - result[bean.CiBuildType(buildType)] = count + result[CiPipeline.CiBuildType(buildType)] = count } return result } diff --git a/pkg/pipeline/CiCdPipelineOrchestrator.go b/pkg/pipeline/CiCdPipelineOrchestrator.go index 964fd688d4..b0eed4b095 100644 --- a/pkg/pipeline/CiCdPipelineOrchestrator.go +++ b/pkg/pipeline/CiCdPipelineOrchestrator.go @@ -26,14 +26,20 @@ import ( "encoding/json" "errors" "fmt" + "golang.org/x/exp/slices" "path" "regexp" "strconv" "strings" "time" + "github.com/devtron-labs/devtron/pkg/pipeline/adapter" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" + "github.com/devtron-labs/devtron/util/response/pagination" + "go.opentelemetry.io/otel" + util3 "github.com/devtron-labs/common-lib/utils/k8s" - bean4 "github.com/devtron-labs/devtron/api/bean" + apiBean "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/client/gitSensor" app2 "github.com/devtron-labs/devtron/internal/sql/repository/app" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" @@ -81,7 +87,7 @@ type CiCdPipelineOrchestrator interface { PatchMaterialValue(createRequest *bean.CiPipeline, userId int32, oldPipeline *pipelineConfig.CiPipeline) (*bean.CiPipeline, error) PatchCiMaterialSource(ciPipeline *bean.CiMaterialPatchRequest, userId int32) (*bean.CiMaterialPatchRequest, error) PatchCiMaterialSourceValue(patchRequest *bean.CiMaterialValuePatchRequest, userId int32, value string, token string, checkAppSpecificAccess func(token, action string, appId int) (bool, error)) (*pipelineConfig.CiPipelineMaterial, error) - CreateCiTemplateBean(ciPipelineId int, dockerRegistryId string, dockerRepository string, gitMaterialId int, ciBuildConfig *pipelineConfigBean.CiBuildConfigBean, userId int32) pipelineConfigBean.CiTemplateBean + CreateCiTemplateBean(ciPipelineId int, dockerRegistryId string, dockerRepository string, gitMaterialId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) pipelineConfigBean.CiTemplateBean UpdateCiPipelineMaterials(materialsUpdate []*pipelineConfig.CiPipelineMaterial) error PipelineExists(name string) (bool, error) GetCdPipelinesForApp(appId int) (cdPipelines *bean.CdPipelines, err error) @@ -93,6 +99,8 @@ type CiCdPipelineOrchestrator interface { CreateEcrRepo(dockerRepository, AWSRegion, AWSAccessKeyId, AWSSecretAccessKey string) error GetCdPipelinesForEnv(envId int, requestedAppIds []int) (cdPipelines *bean.CdPipelines, err error) AddPipelineToTemplate(createRequest *bean.CiConfigRequest, isSwitchCiPipelineRequest bool) (resp *bean.CiConfigRequest, err error) + GetSourceCiDownStreamFilters(ctx context.Context, sourceCiPipelineId int) (*CiPipeline.SourceCiDownStreamEnv, error) + GetSourceCiDownStreamInfo(ctx context.Context, sourceCIPipeline int, req *CiPipeline.SourceCiDownStreamFilters) (pagination.PaginatedResponse[CiPipeline.SourceCiDownStreamResponse], error) } type CiCdPipelineOrchestratorImpl struct { @@ -102,19 +110,17 @@ type CiCdPipelineOrchestratorImpl struct { pipelineRepository pipelineConfig.PipelineRepository ciPipelineRepository pipelineConfig.CiPipelineRepository ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository GitSensorClient gitSensor.Client ciConfig *types.CiCdConfig appWorkflowRepository appWorkflow.AppWorkflowRepository envRepository repository2.EnvironmentRepository attributesService attributes.AttributesService - appListingRepository repository.AppListingRepository appLabelsService app.AppCrudOperationService userAuthService user.UserAuthService prePostCdScriptHistoryService history3.PrePostCdScriptHistoryService - prePostCiScriptHistoryService history3.PrePostCiScriptHistoryService pipelineStageService PipelineStageService ciTemplateService CiTemplateService - ciTemplateOverrideRepository pipelineConfig.CiTemplateOverrideRepository gitMaterialHistoryService history3.GitMaterialHistoryService ciPipelineHistoryService history3.CiPipelineHistoryService dockerArtifactStoreRepository dockerRegistryRepository.DockerArtifactStoreRepository @@ -132,17 +138,15 @@ func NewCiCdPipelineOrchestrator( pipelineRepository pipelineConfig.PipelineRepository, ciPipelineRepository pipelineConfig.CiPipelineRepository, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, GitSensorClient gitSensor.Client, ciConfig *types.CiCdConfig, appWorkflowRepository appWorkflow.AppWorkflowRepository, envRepository repository2.EnvironmentRepository, attributesService attributes.AttributesService, - appListingRepository repository.AppListingRepository, appLabelsService app.AppCrudOperationService, userAuthService user.UserAuthService, prePostCdScriptHistoryService history3.PrePostCdScriptHistoryService, - prePostCiScriptHistoryService history3.PrePostCiScriptHistoryService, pipelineStageService PipelineStageService, - ciTemplateOverrideRepository pipelineConfig.CiTemplateOverrideRepository, gitMaterialHistoryService history3.GitMaterialHistoryService, ciPipelineHistoryService history3.CiPipelineHistoryService, ciTemplateService CiTemplateService, @@ -159,18 +163,16 @@ func NewCiCdPipelineOrchestrator( pipelineRepository: pipelineRepository, ciPipelineRepository: ciPipelineRepository, ciPipelineMaterialRepository: ciPipelineMaterialRepository, + cdWorkflowRepository: cdWorkflowRepository, GitSensorClient: GitSensorClient, ciConfig: ciConfig, appWorkflowRepository: appWorkflowRepository, envRepository: envRepository, attributesService: attributesService, - appListingRepository: appListingRepository, appLabelsService: appLabelsService, userAuthService: userAuthService, prePostCdScriptHistoryService: prePostCdScriptHistoryService, - prePostCiScriptHistoryService: prePostCiScriptHistoryService, pipelineStageService: pipelineStageService, - ciTemplateOverrideRepository: ciTemplateOverrideRepository, gitMaterialHistoryService: gitMaterialHistoryService, ciPipelineHistoryService: ciPipelineHistoryService, ciTemplateService: ciTemplateService, @@ -286,7 +288,7 @@ func (impl CiCdPipelineOrchestratorImpl) validateCiPipelineMaterial(ciPipelineMa func (impl CiCdPipelineOrchestratorImpl) getSkipMessage(ciPipeline *pipelineConfig.CiPipeline) string { switch ciPipeline.PipelineType { - case string(pipelineConfigBean.LINKED_CD): + case string(CiPipeline.LINKED_CD): return "“Sync with Environmentâ€" default: return "“Linked Build Pipelineâ€" @@ -370,7 +372,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. //If customTagObject has been passed, create or update the resource //Otherwise deleteIfExists if createRequest.CustomTagObject != nil && len(createRequest.CustomTagObject.TagPattern) > 0 { - customTag := bean4.CustomTag{ + customTag := apiBean.CustomTag{ EntityKey: pipelineConfigBean.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), TagPattern: createRequest.CustomTagObject.TagPattern, @@ -382,7 +384,7 @@ func (impl CiCdPipelineOrchestratorImpl) PatchMaterialValue(createRequest *bean. return nil, err } } else { - customTag := bean4.CustomTag{ + customTag := apiBean.CustomTag{ EntityKey: pipelineConfigBean.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipelineObject.Id), Enabled: false, @@ -740,7 +742,7 @@ func (impl CiCdPipelineOrchestratorImpl) DeleteCiPipelineAndCiEnvMappings(tx *pg return err } -func (impl CiCdPipelineOrchestratorImpl) CreateCiTemplateBean(ciPipelineId int, dockerRegistryId string, dockerRepository string, gitMaterialId int, ciBuildConfig *pipelineConfigBean.CiBuildConfigBean, userId int32) pipelineConfigBean.CiTemplateBean { +func (impl CiCdPipelineOrchestratorImpl) CreateCiTemplateBean(ciPipelineId int, dockerRegistryId string, dockerRepository string, gitMaterialId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) pipelineConfigBean.CiTemplateBean { CiTemplateBean := pipelineConfigBean.CiTemplateBean{ CiTemplate: nil, CiTemplateOverride: &pipelineConfig.CiTemplateOverride{ @@ -767,7 +769,7 @@ func (impl CiCdPipelineOrchestratorImpl) SaveHistoryOfBaseTemplate(userId int32, CiTemplateBean := pipelineConfigBean.CiTemplateBean{ CiTemplate: nil, CiTemplateOverride: &pipelineConfig.CiTemplateOverride{}, - CiBuildConfig: &pipelineConfigBean.CiBuildConfigBean{}, + CiBuildConfig: &CiPipeline.CiBuildConfigBean{}, UserId: userId, } err := impl.ciPipelineHistoryService.SaveHistory(pipeline, materials, &CiTemplateBean, repository4.TRIGGER_DELETE) @@ -837,7 +839,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf //If customTagObejct has been passed, save it if !ciPipeline.EnableCustomTag { - err := impl.customTagService.DisableCustomTagIfExist(bean4.CustomTag{ + err := impl.customTagService.DisableCustomTagIfExist(apiBean.CustomTag{ EntityKey: pipelineConfigBean.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), }) @@ -845,7 +847,7 @@ func (impl CiCdPipelineOrchestratorImpl) CreateCiConf(createRequest *bean.CiConf return nil, err } } else if ciPipeline.CustomTagObject != nil && len(ciPipeline.CustomTagObject.TagPattern) != 0 { - customTag := &bean4.CustomTag{ + customTag := &apiBean.CustomTag{ EntityKey: pipelineConfigBean.EntityTypeCiPipelineId, EntityValue: strconv.Itoa(ciPipeline.Id), TagPattern: ciPipeline.CustomTagObject.TagPattern, @@ -1389,7 +1391,7 @@ func (impl CiCdPipelineOrchestratorImpl) createAppGroup(name, description string displayName := name appName := name if appType == helper.Job { - appName = name + "-" + util2.Generate(8) + "J" + pipelineConfigBean.UniquePlaceHolderForAppName + appName = name + "-" + util2.Generate(8) + "J" + CiPipeline.UniquePlaceHolderForAppName } pg := &app2.App{ Active: true, @@ -2104,3 +2106,99 @@ func (impl CiCdPipelineOrchestratorImpl) AddPipelineToTemplate(createRequest *be } return createRequest, err } + +func (impl CiCdPipelineOrchestratorImpl) GetSourceCiDownStreamFilters(ctx context.Context, sourceCiPipelineId int) (*CiPipeline.SourceCiDownStreamEnv, error) { + ctx, span := otel.Tracer("orchestrator").Start(ctx, "GetSourceCiDownStreamFilters") + defer span.End() + linkedCiPipelines, err := impl.ciPipelineRepository.GetLinkedCiPipelines(ctx, sourceCiPipelineId) + if err != nil { + impl.logger.Errorw("error in getting linked Ci pipelines for given source Ci pipeline Id ", "sourceCiPipelineId", sourceCiPipelineId, "err", err) + return &CiPipeline.SourceCiDownStreamEnv{ + EnvNames: []string{}, + }, err + } + envNames, err := impl.getAttachedEnvNamesByCiIds(ctx, linkedCiPipelines) + if err != nil { + impl.logger.Errorw("error in fetching environment names for linked Ci pipelines", "linkedCiPipelines", linkedCiPipelines, "err", err) + return &CiPipeline.SourceCiDownStreamEnv{ + EnvNames: []string{}, + }, err + } + res := &CiPipeline.SourceCiDownStreamEnv{ + EnvNames: envNames, + } + return res, nil +} + +func (impl CiCdPipelineOrchestratorImpl) getAttachedEnvNamesByCiIds(ctx context.Context, linkedCiPipelines []*pipelineConfig.CiPipeline) ([]string, error) { + ctx, span := otel.Tracer("orchestrator").Start(ctx, "getAttachedEnvNamesByCiIds") + defer span.End() + var ciPipelineIds []int + for _, ciPipeline := range linkedCiPipelines { + ciPipelineIds = append(ciPipelineIds, ciPipeline.Id) + } + pipelines, err := impl.pipelineRepository.FindWithEnvironmentByCiIds(ctx, ciPipelineIds) + if util.IsErrNoRows(err) { + impl.logger.Info("no pipelines available for these ciPipelineIds", "ciPipelineIds", ciPipelineIds) + return []string{}, nil + } else if err != nil { + impl.logger.Errorw("error in getting pipelines for these ciPipelineIds ", "ciPipelineIds", ciPipelineIds, "err", err) + return nil, err + } + if pipelines == nil { + impl.logger.Info("no pipelines available for these ciPipelineIds", "ciPipelineIds", ciPipelineIds) + return []string{}, nil + } + var envNames []string + for _, pipeline := range pipelines { + if !slices.Contains(envNames, pipeline.Environment.Name) { + envNames = append(envNames, pipeline.Environment.Name) + } + } + return envNames, nil +} + +func (impl CiCdPipelineOrchestratorImpl) GetSourceCiDownStreamInfo(ctx context.Context, sourceCIPipeline int, req *CiPipeline.SourceCiDownStreamFilters) (pagination.PaginatedResponse[CiPipeline.SourceCiDownStreamResponse], error) { + ctx, span := otel.Tracer("orchestrator").Start(ctx, "GetSourceCiDownStreamInfo") + defer span.End() + response := pagination.NewPaginatedResponse[CiPipeline.SourceCiDownStreamResponse]() + queryReq := &pagination.RepositoryRequest{ + Order: req.SortOrder, + SortBy: req.SortBy, + Limit: req.Size, + Offset: req.Offset, + } + linkedCIDetails, totalCount, err := impl.ciPipelineRepository.GetDownStreamInfo(ctx, sourceCIPipeline, req.SearchKey, req.EnvName, queryReq) + if util.IsErrNoRows(err) { + impl.logger.Info("no linked ci pipelines available", "SourceCIPipeline", sourceCIPipeline) + return response, nil + } else if err != nil { + impl.logger.Errorw("error in getting linked ci pipelines", "SourceCIPipeline", sourceCIPipeline, "err", err) + return response, err + } + response.UpdateTotalCount(totalCount) + response.UpdateOffset(req.Offset) + response.UpdateSize(req.Size) + + var pipelineIds []int + for _, item := range linkedCIDetails { + if item.PipelineId != 0 { + pipelineIds = append(pipelineIds, item.PipelineId) + } + } + + latestWfrs, err := impl.cdWorkflowRepository.FindLatestRunnerByPipelineIdsAndRunnerType(ctx, pipelineIds, apiBean.CD_WORKFLOW_TYPE_DEPLOY) + if util.IsErrNoRows(err) { + impl.logger.Info("no deployments have been triggered yet", "pipelineIds", pipelineIds) + // update the response with the pipelineConfig.LinkedCIDetails + data := adapter.GetSourceCiDownStreamResponse(linkedCIDetails) + response.PushData(data...) + return response, nil + } else if err != nil { + impl.logger.Errorw("error in getting last deployment status", "pipelineIds", pipelineIds, "err", err) + return response, err + } + data := adapter.GetSourceCiDownStreamResponse(linkedCIDetails, latestWfrs...) + response.PushData(data...) + return response, nil +} diff --git a/pkg/pipeline/CiLogService.go b/pkg/pipeline/CiLogService.go index 2fd2c7d9e3..dd2a886344 100644 --- a/pkg/pipeline/CiLogService.go +++ b/pkg/pipeline/CiLogService.go @@ -21,7 +21,7 @@ import ( "context" blob_storage "github.com/devtron-labs/common-lib/blob-storage" "github.com/devtron-labs/common-lib/utils/k8s" - "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/types" "go.uber.org/zap" "io" @@ -67,7 +67,7 @@ func (impl *CiLogServiceImpl) FetchRunningWorkflowLogs(ciLogRequest types.BuildL return nil, nil, err } } - req := impl.k8sUtil.GetLogsForAPod(kubeClient, ciLogRequest.Namespace, ciLogRequest.PodName, bean.Main, true) + req := impl.k8sUtil.GetLogsForAPod(kubeClient, ciLogRequest.Namespace, ciLogRequest.PodName, CiPipeline.Main, true) podLogs, err := req.Stream(context.Background()) if podLogs == nil || err != nil { impl.logger.Errorw("error in opening stream", "name", ciLogRequest.PodName) diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index ce5e75164b..3a765f7837 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -23,6 +23,7 @@ import ( "fmt" "github.com/devtron-labs/devtron/pkg/infraConfig" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/infraProviders" "path/filepath" "strconv" @@ -37,7 +38,6 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" repository1 "github.com/devtron-labs/devtron/pkg/cluster/repository" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" - "github.com/devtron-labs/devtron/pkg/pipeline/history" "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/plugin" @@ -65,34 +65,32 @@ type CiService interface { } type CiServiceImpl struct { - Logger *zap.SugaredLogger - workflowService WorkflowService - ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository - ciWorkflowRepository pipelineConfig.CiWorkflowRepository - eventClient client.EventClient - eventFactory client.EventFactory - mergeUtil *util.MergeUtil - ciPipelineRepository pipelineConfig.CiPipelineRepository - prePostCiScriptHistoryService history.PrePostCiScriptHistoryService - pipelineStageService PipelineStageService - userService user.UserService - ciTemplateService CiTemplateService - appCrudOperationService app.AppCrudOperationService - envRepository repository1.EnvironmentRepository - appRepository appRepository.AppRepository - customTagService CustomTagService - config *types.CiConfig - scopedVariableManager variables.ScopedVariableManager - pluginInputVariableParser PluginInputVariableParser - globalPluginService plugin.GlobalPluginService - infraProvider infraProviders.InfraProvider + Logger *zap.SugaredLogger + workflowService WorkflowService + ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + eventClient client.EventClient + eventFactory client.EventFactory + ciPipelineRepository pipelineConfig.CiPipelineRepository + pipelineStageService PipelineStageService + userService user.UserService + ciTemplateService CiTemplateService + appCrudOperationService app.AppCrudOperationService + envRepository repository1.EnvironmentRepository + appRepository appRepository.AppRepository + customTagService CustomTagService + config *types.CiConfig + scopedVariableManager variables.ScopedVariableManager + pluginInputVariableParser PluginInputVariableParser + globalPluginService plugin.GlobalPluginService + infraProvider infraProviders.InfraProvider } func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService, ciPipelineMaterialRepository pipelineConfig.CiPipelineMaterialRepository, ciWorkflowRepository pipelineConfig.CiWorkflowRepository, eventClient client.EventClient, - eventFactory client.EventFactory, mergeUtil *util.MergeUtil, ciPipelineRepository pipelineConfig.CiPipelineRepository, - prePostCiScriptHistoryService history.PrePostCiScriptHistoryService, + eventFactory client.EventFactory, + ciPipelineRepository pipelineConfig.CiPipelineRepository, pipelineStageService PipelineStageService, userService user.UserService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, @@ -103,26 +101,24 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService infraProvider infraProviders.InfraProvider, ) *CiServiceImpl { cis := &CiServiceImpl{ - Logger: Logger, - workflowService: workflowService, - ciPipelineMaterialRepository: ciPipelineMaterialRepository, - ciWorkflowRepository: ciWorkflowRepository, - eventClient: eventClient, - eventFactory: eventFactory, - mergeUtil: mergeUtil, - ciPipelineRepository: ciPipelineRepository, - prePostCiScriptHistoryService: prePostCiScriptHistoryService, - pipelineStageService: pipelineStageService, - userService: userService, - ciTemplateService: ciTemplateService, - appCrudOperationService: appCrudOperationService, - envRepository: envRepository, - appRepository: appRepository, - scopedVariableManager: scopedVariableManager, - customTagService: customTagService, - pluginInputVariableParser: pluginInputVariableParser, - globalPluginService: globalPluginService, - infraProvider: infraProvider, + Logger: Logger, + workflowService: workflowService, + ciPipelineMaterialRepository: ciPipelineMaterialRepository, + ciWorkflowRepository: ciWorkflowRepository, + eventClient: eventClient, + eventFactory: eventFactory, + ciPipelineRepository: ciPipelineRepository, + pipelineStageService: pipelineStageService, + userService: userService, + ciTemplateService: ciTemplateService, + appCrudOperationService: appCrudOperationService, + envRepository: envRepository, + appRepository: appRepository, + scopedVariableManager: scopedVariableManager, + customTagService: customTagService, + pluginInputVariableParser: pluginInputVariableParser, + globalPluginService: globalPluginService, + infraProvider: infraProvider, } config, err := types.GetCiConfig() if err != nil { @@ -152,7 +148,7 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) if err != nil { return 0, err } - if trigger.PipelineType == string(pipelineConfigBean.CI_JOB) && len(ciMaterials) != 0 { + if trigger.PipelineType == string(CiPipeline.CI_JOB) && len(ciMaterials) != 0 { ciMaterials = []*pipelineConfig.CiPipelineMaterial{ciMaterials[0]} ciMaterials[0].GitMaterial = nil ciMaterials[0].GitMaterialId = 0 @@ -519,7 +515,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. var dockerfilePath string var dockerRepository string var checkoutPath string - var ciBuildConfigBean *pipelineConfigBean.CiBuildConfigBean + var ciBuildConfigBean *CiPipeline.CiBuildConfigBean dockerRegistry := &repository3.DockerArtifactStore{} if !pipeline.IsExternal && pipeline.IsDockerConfigOverridden { templateOverrideBean, err := impl.ciTemplateService.FindTemplateOverrideByCiPipelineId(pipeline.Id) @@ -626,13 +622,13 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. ciBuildConfigBean.PipelineType = trigger.PipelineType - if ciBuildConfigBean.CiBuildType == pipelineConfigBean.SELF_DOCKERFILE_BUILD_TYPE || ciBuildConfigBean.CiBuildType == pipelineConfigBean.MANAGED_DOCKERFILE_BUILD_TYPE { + if ciBuildConfigBean.CiBuildType == CiPipeline.SELF_DOCKERFILE_BUILD_TYPE || ciBuildConfigBean.CiBuildType == CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE { ciBuildConfigBean.DockerBuildConfig.BuildContext = filepath.Join(buildContextCheckoutPath, ciBuildConfigBean.DockerBuildConfig.BuildContext) dockerBuildConfig := ciBuildConfigBean.DockerBuildConfig dockerfilePath = filepath.Join(checkoutPath, dockerBuildConfig.DockerfilePath) dockerBuildConfig.DockerfilePath = dockerfilePath checkoutPath = dockerfilePath[:strings.LastIndex(dockerfilePath, "/")+1] - } else if ciBuildConfigBean.CiBuildType == pipelineConfigBean.BUILDPACK_BUILD_TYPE { + } else if ciBuildConfigBean.CiBuildType == CiPipeline.BUILDPACK_BUILD_TYPE { buildPackConfig := ciBuildConfigBean.BuildPackConfig checkoutPath = filepath.Join(checkoutPath, buildPackConfig.ProjectPath) } diff --git a/pkg/pipeline/CiTemplateService.go b/pkg/pipeline/CiTemplateService.go index ae9511a444..e408fdd5a4 100644 --- a/pkg/pipeline/CiTemplateService.go +++ b/pkg/pipeline/CiTemplateService.go @@ -5,6 +5,7 @@ import ( "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/go-pg/pg" "go.uber.org/zap" @@ -138,7 +139,7 @@ func (impl CiTemplateServiceImpl) FindTemplateOverrideByCiPipelineIds(ciPipeline return templateBeanOverrides, nil } -func (impl CiTemplateServiceImpl) extractBuildConfigBean(templateOverride *pipelineConfig.CiTemplateOverride) (*bean.CiBuildConfigBean, error) { +func (impl CiTemplateServiceImpl) extractBuildConfigBean(templateOverride *pipelineConfig.CiTemplateOverride) (*CiPipeline.CiBuildConfigBean, error) { ciBuildConfigBean, err := adapter.ConvertDbBuildConfigToBean(templateOverride.CiBuildConfig) if err != nil { impl.Logger.Errorw("error occurred while converting dbBuildConfig to bean", "ciBuildConfig", diff --git a/pkg/pipeline/CiTemplateService_test.go b/pkg/pipeline/CiTemplateService_test.go index 5632c07715..1fc06b46af 100644 --- a/pkg/pipeline/CiTemplateService_test.go +++ b/pkg/pipeline/CiTemplateService_test.go @@ -7,6 +7,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" pipelineMocks "github.com/devtron-labs/devtron/pkg/pipeline/mocks" "github.com/devtron-labs/devtron/pkg/sql" "github.com/stretchr/testify/assert" @@ -27,7 +28,7 @@ func TestCiTemplateService(t *testing.T) { templateDbEntity := &pipelineConfig.CiTemplate{ Id: 1, CiBuildConfig: &pipelineConfig.CiBuildConfig{ - Type: string(bean.BUILDPACK_BUILD_TYPE), + Type: string(CiPipeline.BUILDPACK_BUILD_TYPE), BuildMetadata: "{\"BuilderId\":\"" + builderId + "\"}", }, } @@ -37,7 +38,7 @@ func TestCiTemplateService(t *testing.T) { template := templateBean.CiTemplate assert.Equal(t, template.Id, templateDbEntity.Id) assert.NotNil(t, templateBean.CiBuildConfig) - assert.Equal(t, bean.BUILDPACK_BUILD_TYPE, templateBean.CiBuildConfig.CiBuildType) + assert.Equal(t, CiPipeline.BUILDPACK_BUILD_TYPE, templateBean.CiBuildConfig.CiBuildType) assert.Equal(t, builderId, templateBean.CiBuildConfig.BuildPackConfig.BuilderId) }) @@ -64,7 +65,7 @@ func TestCiTemplateService(t *testing.T) { assert.Equal(t, template.TargetPlatform, templateDbEntity.TargetPlatform) assert.Nil(t, template.CiBuildConfig) ciBuildConfig := templateBean.CiBuildConfig - assert.Equal(t, bean.SELF_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) + assert.Equal(t, CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) assert.NotNil(t, ciBuildConfig.DockerBuildConfig) assert.Equal(t, templateDbEntity.TargetPlatform, ciBuildConfig.DockerBuildConfig.TargetPlatform) args := ciBuildConfig.DockerBuildConfig.Args @@ -84,8 +85,8 @@ func TestCiTemplateService(t *testing.T) { notPlatform := "linux/arm64" gitMaterialId := 2 ciBuildConfigId := 3 - managedDockerfileBuildType := bean.MANAGED_DOCKERFILE_BUILD_TYPE - buildConfigMetadata := &bean.DockerBuildConfig{ + managedDockerfileBuildType := CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE + buildConfigMetadata := &CiPipeline.DockerBuildConfig{ DockerfileContent: dockerfileContent, TargetPlatform: targetPlatform, } @@ -147,7 +148,7 @@ func TestCiTemplateService(t *testing.T) { assert.Equal(t, mockedTemplateOverride.Id, templateOverride.Id) assert.Equal(t, mockedTemplateOverride.GitMaterialId, templateOverride.GitMaterialId) assert.Equal(t, mockedTemplateOverride.CiPipelineId, templateOverride.CiPipelineId) - assert.Equal(t, bean.SELF_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) + assert.Equal(t, CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) assert.Nil(t, ciBuildConfig.BuildPackConfig) assert.Empty(t, ciBuildConfig.DockerBuildConfig.DockerBuildOptions, "docker build options not supported in pipeline override") assert.Equal(t, mockedTemplateOverride.DockerfilePath, ciBuildConfig.DockerBuildConfig.DockerfilePath) @@ -165,7 +166,7 @@ func TestCiTemplateService(t *testing.T) { CiPipelineId: 2, GitMaterialId: 3, CiBuildConfig: &pipelineConfig.CiBuildConfig{ - Type: string(bean.BUILDPACK_BUILD_TYPE), + Type: string(CiPipeline.BUILDPACK_BUILD_TYPE), BuildMetadata: "{\"BuilderId\":\"" + builderId1 + "\"}", }, }, { @@ -173,7 +174,7 @@ func TestCiTemplateService(t *testing.T) { CiPipelineId: 3, GitMaterialId: 3, CiBuildConfig: &pipelineConfig.CiBuildConfig{ - Type: string(bean.BUILDPACK_BUILD_TYPE), + Type: string(CiPipeline.BUILDPACK_BUILD_TYPE), BuildMetadata: "{\"BuilderId\":\"" + builderId1 + "\"}", }, }} @@ -190,7 +191,7 @@ func TestCiTemplateService(t *testing.T) { assert.Equal(t, mockedTemplateOverride.Id, templateOverride.Id) assert.Equal(t, mockedTemplateOverride.GitMaterialId, templateOverride.GitMaterialId) assert.Equal(t, mockedTemplateOverride.CiPipelineId, templateOverride.CiPipelineId) - assert.Equal(t, bean.BUILDPACK_BUILD_TYPE, ciBuildConfig.CiBuildType) + assert.Equal(t, CiPipeline.BUILDPACK_BUILD_TYPE, ciBuildConfig.CiBuildType) assert.Nil(t, ciBuildConfig.DockerBuildConfig) assert.NotNil(t, ciBuildConfig.BuildPackConfig) assert.Equal(t, builderId1, ciBuildConfig.BuildPackConfig.BuilderId) @@ -205,7 +206,7 @@ func TestCiTemplateService(t *testing.T) { dockerfileContent := "FROM node:9\r\n\r\nWORKDIR /app\r\n\r\nRUN npm install -g contentful-cli\r\n\r\nCOPY package.json .\r\nRUN npm install\r\n\r\nCOPY . .\r\n\r\nUSER node\r\nEXPOSE 3000\r\n\r\nCMD [\"npm\", \"run\", \"start:dev\"]" targetPlatform := "linux/amd64" builderId := "sample-builder" - buildConfigMetadata := &bean.DockerBuildConfig{ + buildConfigMetadata := &CiPipeline.DockerBuildConfig{ DockerfileContent: dockerfileContent, TargetPlatform: targetPlatform, } @@ -216,7 +217,7 @@ func TestCiTemplateService(t *testing.T) { CiPipelineId: 2, GitMaterialId: 3, CiBuildConfig: &pipelineConfig.CiBuildConfig{ - Type: string(bean.MANAGED_DOCKERFILE_BUILD_TYPE), + Type: string(CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE), BuildMetadata: string(buildMetadata), }, }, { @@ -224,7 +225,7 @@ func TestCiTemplateService(t *testing.T) { CiPipelineId: 3, GitMaterialId: 3, CiBuildConfig: &pipelineConfig.CiBuildConfig{ - Type: string(bean.BUILDPACK_BUILD_TYPE), + Type: string(CiPipeline.BUILDPACK_BUILD_TYPE), BuildMetadata: "{\"BuilderId\":\"" + builderId + "\"}", }, }} @@ -241,13 +242,13 @@ func TestCiTemplateService(t *testing.T) { assert.Equal(t, mockedTemplateOverride.Id, templateOverride.Id) assert.Equal(t, mockedTemplateOverride.GitMaterialId, templateOverride.GitMaterialId) assert.Equal(t, mockedTemplateOverride.CiPipelineId, templateOverride.CiPipelineId) - if ciBuildConfig.CiBuildType == bean.MANAGED_DOCKERFILE_BUILD_TYPE { - assert.Equal(t, bean.MANAGED_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) + if ciBuildConfig.CiBuildType == CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE { + assert.Equal(t, CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE, ciBuildConfig.CiBuildType) assert.Nil(t, ciBuildConfig.BuildPackConfig) assert.NotNil(t, ciBuildConfig.DockerBuildConfig) assert.Equal(t, dockerfileContent, ciBuildConfig.DockerBuildConfig.DockerfileContent) - } else if ciBuildConfig.CiBuildType == bean.BUILDPACK_BUILD_TYPE { - assert.Equal(t, bean.BUILDPACK_BUILD_TYPE, ciBuildConfig.CiBuildType) + } else if ciBuildConfig.CiBuildType == CiPipeline.BUILDPACK_BUILD_TYPE { + assert.Equal(t, CiPipeline.BUILDPACK_BUILD_TYPE, ciBuildConfig.CiBuildType) assert.Nil(t, ciBuildConfig.DockerBuildConfig) assert.NotNil(t, ciBuildConfig.BuildPackConfig) assert.Equal(t, builderId, ciBuildConfig.BuildPackConfig.BuilderId) @@ -269,11 +270,11 @@ func TestCiTemplateService(t *testing.T) { mockedCiTemplateBean.CiTemplateOverride = mockedTemplateOverride dockerBuildOptions := map[string]string{} dockerBuildOptions["volume"] = "abcd:defg" - mockedCiTemplateBean.CiBuildConfig = &bean.CiBuildConfigBean{ + mockedCiTemplateBean.CiBuildConfig = &CiPipeline.CiBuildConfigBean{ Id: mockedCiBuildConfigId, GitMaterialId: materialId, - CiBuildType: bean.SELF_DOCKERFILE_BUILD_TYPE, - DockerBuildConfig: &bean.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, + CiBuildType: CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, } mockedUserId := int32(4) mockedCiTemplateBean.UserId = mockedUserId @@ -285,7 +286,7 @@ func TestCiTemplateService(t *testing.T) { mockedBuildConfigService.On("UpdateOrSave", mock.AnythingOfType("int"), mock.AnythingOfType("int"), mock.AnythingOfType("*bean.CiBuildConfigBean"), mock.AnythingOfType("int32")). Return( - func(templateId int, overrideTemplateId int, ciBuildConfig *bean.CiBuildConfigBean, userId int32) *bean.CiBuildConfigBean { + func(templateId int, overrideTemplateId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) *CiPipeline.CiBuildConfigBean { assert.Equal(t, 0, templateId) assert.Equal(t, mockedTemplateOverrideId, overrideTemplateId) assert.Equal(t, mockedUserId, userId) @@ -314,11 +315,11 @@ func TestCiTemplateService(t *testing.T) { mockedCiTemplateBean.CiTemplate = mockedTemplate dockerBuildOptions := map[string]string{} dockerBuildOptions["volume"] = "abcd:defg" - mockedCiTemplateBean.CiBuildConfig = &bean.CiBuildConfigBean{ + mockedCiTemplateBean.CiBuildConfig = &CiPipeline.CiBuildConfigBean{ Id: mockedCiBuildConfigId, GitMaterialId: materialId, - CiBuildType: bean.SELF_DOCKERFILE_BUILD_TYPE, - DockerBuildConfig: &bean.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, + CiBuildType: CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, } mockedUserId := int32(4) mockedCiTemplateBean.UserId = mockedUserId @@ -330,7 +331,7 @@ func TestCiTemplateService(t *testing.T) { mockedBuildConfigService.On("UpdateOrSave", mock.AnythingOfType("int"), mock.AnythingOfType("int"), mock.AnythingOfType("*bean.CiBuildConfigBean"), mock.AnythingOfType("int32")). Return( - func(templateId int, overrideTemplateId int, ciBuildConfig *bean.CiBuildConfigBean, userId int32) *bean.CiBuildConfigBean { + func(templateId int, overrideTemplateId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) *CiPipeline.CiBuildConfigBean { assert.Equal(t, 0, overrideTemplateId) assert.Equal(t, mockedTemplateId, templateId) assert.Equal(t, mockedUserId, userId) @@ -358,18 +359,18 @@ func TestCiTemplateService(t *testing.T) { mockedCiTemplateBean.CiTemplate = mockedTemplate dockerBuildOptions := map[string]string{} dockerBuildOptions["volume"] = "abcd:defg" - mockedCiTemplateBean.CiBuildConfig = &bean.CiBuildConfigBean{ + mockedCiTemplateBean.CiBuildConfig = &CiPipeline.CiBuildConfigBean{ Id: 0, GitMaterialId: materialId, - CiBuildType: bean.SELF_DOCKERFILE_BUILD_TYPE, - DockerBuildConfig: &bean.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, + CiBuildType: CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{DockerfilePath: "Dockerfile", TargetPlatform: "linux/amd64", DockerBuildOptions: dockerBuildOptions}, } mockedUserId := int32(4) mockedCiTemplateBean.UserId = mockedUserId mockedBuildConfigService.On("Save", mock.AnythingOfType("int"), mock.AnythingOfType("int"), mock.AnythingOfType("*bean.CiBuildConfigBean"), mock.AnythingOfType("int32")). Return( - func(templateId int, overrideTemplateId int, ciBuildConfig *bean.CiBuildConfigBean, userId int32) error { + func(templateId int, overrideTemplateId int, ciBuildConfig *CiPipeline.CiBuildConfigBean, userId int32) error { assert.Equal(t, 0, overrideTemplateId) assert.Equal(t, mockedTemplate.Id, templateId) assert.Equal(t, mockedUserId, userId) @@ -412,11 +413,11 @@ func TestCiTemplateService(t *testing.T) { ciBuildConfig := ciTemplateBean.CiBuildConfig - buildPackConfig := &bean.BuildPackConfig{ + buildPackConfig := &CiPipeline.BuildPackConfig{ BuilderId: "gcr.io/buildpacks/builder:v1", } //buildPackConfig.BuilderId = "heroku/buildpacks:20" - ciBuildConfig.CiBuildType = bean.BUILDPACK_BUILD_TYPE + ciBuildConfig.CiBuildType = CiPipeline.BUILDPACK_BUILD_TYPE ciBuildConfig.BuildPackConfig = buildPackConfig //args := make(map[string]string) diff --git a/pkg/pipeline/WorkflowServiceIT_test.go b/pkg/pipeline/WorkflowServiceIT_test.go index c3d189350f..4bf7efcd3d 100644 --- a/pkg/pipeline/WorkflowServiceIT_test.go +++ b/pkg/pipeline/WorkflowServiceIT_test.go @@ -18,6 +18,7 @@ import ( k8s2 "github.com/devtron-labs/devtron/pkg/k8s" "github.com/devtron-labs/devtron/pkg/k8s/informer" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/executors" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/stretchr/testify/assert" @@ -48,20 +49,20 @@ func getWorkflowServiceImpl(t *testing.T) *WorkflowServiceImpl { newEnvConfigOverrideRepository := chartConfig.NewEnvConfigOverrideRepository(dbConnection) newConfigMapRepositoryImpl := chartConfig.NewConfigMapRepositoryImpl(logger, dbConnection) newChartRepository := chartRepoRepository.NewChartRepository(dbConnection) - newCommonServiceImpl := commonService.NewCommonServiceImpl(logger, newChartRepository, newEnvConfigOverrideRepository, nil, nil, nil, nil, nil, nil, nil) + newCommonServiceImpl := commonService.NewCommonServiceImpl(logger, newChartRepository, nil, newEnvConfigOverrideRepository, nil, nil, nil, nil, nil, nil, nil) mergeUtil := util.MergeUtil{Logger: logger} - appService := app.NewAppService(nil, nil, &mergeUtil, logger, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, newConfigMapRepositoryImpl, nil, nil, nil, nil, nil, newCommonServiceImpl, nil, nil, nil, nil, nil, nil, nil, nil, "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + appService := app.NewAppService(nil, nil, &mergeUtil, logger, nil, nil, nil, nil, nil, newConfigMapRepositoryImpl, nil, nil, newCommonServiceImpl, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) runTimeConfig, _ := client.GetRuntimeConfig() k8sUtil := k8s.NewK8sUtil(logger, runTimeConfig) clusterRepositoryImpl := repository3.NewClusterRepositoryImpl(dbConnection, logger) v := informer.NewGlobalMapClusterNamespace() k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(logger, v, runTimeConfig, k8sUtil) clusterService := cluster.NewClusterServiceImpl(clusterRepositoryImpl, logger, k8sUtil, k8sInformerFactoryImpl, nil, nil, nil) - k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(logger, k8sUtil, clusterService) + k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(logger, k8sUtil, clusterService, nil) appStatusRepositoryImpl := appStatus.NewAppStatusRepositoryImpl(dbConnection, logger) environmentRepositoryImpl := repository3.NewEnvironmentRepositoryImpl(dbConnection, logger, appStatusRepositoryImpl) argoWorkflowExecutorImpl := executors.NewArgoWorkflowExecutorImpl(logger) - workflowServiceImpl, _ := NewWorkflowServiceImpl(logger, environmentRepositoryImpl, ciCdConfig, appService, globalCMCSServiceImpl, argoWorkflowExecutorImpl, k8sUtil, nil, k8sCommonServiceImpl) + workflowServiceImpl, _ := NewWorkflowServiceImpl(logger, environmentRepositoryImpl, ciCdConfig, appService, globalCMCSServiceImpl, argoWorkflowExecutorImpl, k8sUtil, nil, k8sCommonServiceImpl, nil) return workflowServiceImpl } func TestWorkflowServiceImpl_SubmitWorkflow(t *testing.T) { @@ -153,13 +154,13 @@ func TestWorkflowServiceImpl_SubmitWorkflow(t *testing.T) { RefPlugins: nil, AppName: "app", TriggerByAuthor: "admin", - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 1, GitMaterialId: 0, BuildContextGitMaterialId: 1, UseRootBuildContext: true, CiBuildType: "self-dockerfile-build", - DockerBuildConfig: &bean2.DockerBuildConfig{ + DockerBuildConfig: &CiPipeline.DockerBuildConfig{ DockerfilePath: "Dockerfile", DockerfileContent: "", Args: nil, @@ -289,13 +290,13 @@ func TestWorkflowServiceImpl_SubmitWorkflow(t *testing.T) { RefPlugins: nil, AppName: "app", TriggerByAuthor: "admin", - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 1, GitMaterialId: 0, BuildContextGitMaterialId: 1, UseRootBuildContext: true, CiBuildType: "self-dockerfile-build", - DockerBuildConfig: &bean2.DockerBuildConfig{ + DockerBuildConfig: &CiPipeline.DockerBuildConfig{ DockerfilePath: "Dockerfile", DockerfileContent: "", Args: nil, @@ -468,7 +469,7 @@ func TestWorkflowServiceImpl_SubmitWorkflow(t *testing.T) { RefPlugins: nil, AppName: "job/f1851uikJ", TriggerByAuthor: "admin", - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 2, GitMaterialId: 0, BuildContextGitMaterialId: 0, @@ -665,7 +666,7 @@ func TestWorkflowServiceImpl_SubmitWorkflow(t *testing.T) { RefPlugins: nil, AppName: "", TriggerByAuthor: "admin", - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 2, GitMaterialId: 0, BuildContextGitMaterialId: 0, diff --git a/pkg/pipeline/adapter/adapter.go b/pkg/pipeline/adapter/adapter.go index 06994490b9..b448bec5a5 100644 --- a/pkg/pipeline/adapter/adapter.go +++ b/pkg/pipeline/adapter/adapter.go @@ -4,7 +4,9 @@ import ( "encoding/json" dockerRegistryRepository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean" pipelineConfigBean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/sql" "time" @@ -38,16 +40,16 @@ func UpdateRegistryDetailsToWrfReq(cdStageWorkflowRequest *types.WorkflowRequest cdStageWorkflowRequest.DockerRegistryId = dockerRegistry.Id } -func ConvertBuildConfigBeanToDbEntity(templateId int, overrideTemplateId int, ciBuildConfigBean *pipelineConfigBean.CiBuildConfigBean, userId int32) (*pipelineConfig.CiBuildConfig, error) { +func ConvertBuildConfigBeanToDbEntity(templateId int, overrideTemplateId int, ciBuildConfigBean *CiPipeline.CiBuildConfigBean, userId int32) (*pipelineConfig.CiBuildConfig, error) { buildMetadata := "" ciBuildType := ciBuildConfigBean.CiBuildType - if ciBuildType == pipelineConfigBean.BUILDPACK_BUILD_TYPE { + if ciBuildType == CiPipeline.BUILDPACK_BUILD_TYPE { buildPackConfigMetadataBytes, err := json.Marshal(ciBuildConfigBean.BuildPackConfig) if err != nil { return nil, err } buildMetadata = string(buildPackConfigMetadataBytes) - } else if ciBuildType == pipelineConfigBean.SELF_DOCKERFILE_BUILD_TYPE || ciBuildType == pipelineConfigBean.MANAGED_DOCKERFILE_BUILD_TYPE { + } else if ciBuildType == CiPipeline.SELF_DOCKERFILE_BUILD_TYPE || ciBuildType == CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE { dockerBuildMetadataBytes, err := json.Marshal(ciBuildConfigBean.DockerBuildConfig) if err != nil { return nil, err @@ -66,20 +68,20 @@ func ConvertBuildConfigBeanToDbEntity(templateId int, overrideTemplateId int, ci return ciBuildConfigEntity, nil } -func ConvertDbBuildConfigToBean(dbBuildConfig *pipelineConfig.CiBuildConfig) (*pipelineConfigBean.CiBuildConfigBean, error) { - var buildPackConfig *pipelineConfigBean.BuildPackConfig - var dockerBuildConfig *pipelineConfigBean.DockerBuildConfig +func ConvertDbBuildConfigToBean(dbBuildConfig *pipelineConfig.CiBuildConfig) (*CiPipeline.CiBuildConfigBean, error) { + var buildPackConfig *CiPipeline.BuildPackConfig + var dockerBuildConfig *CiPipeline.DockerBuildConfig var err error if dbBuildConfig == nil { return nil, nil } - ciBuildType := pipelineConfigBean.CiBuildType(dbBuildConfig.Type) - if ciBuildType == pipelineConfigBean.BUILDPACK_BUILD_TYPE { + ciBuildType := CiPipeline.CiBuildType(dbBuildConfig.Type) + if ciBuildType == CiPipeline.BUILDPACK_BUILD_TYPE { buildPackConfig, err = convertMetadataToBuildPackConfig(dbBuildConfig.BuildMetadata) if err != nil { return nil, err } - } else if ciBuildType == pipelineConfigBean.SELF_DOCKERFILE_BUILD_TYPE || ciBuildType == pipelineConfigBean.MANAGED_DOCKERFILE_BUILD_TYPE { + } else if ciBuildType == CiPipeline.SELF_DOCKERFILE_BUILD_TYPE || ciBuildType == CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE { dockerBuildConfig, err = convertMetadataToDockerBuildConfig(dbBuildConfig.BuildMetadata) if err != nil { return nil, err @@ -90,7 +92,7 @@ func ConvertDbBuildConfigToBean(dbBuildConfig *pipelineConfig.CiBuildConfig) (*p if dbBuildConfig.UseRootContext == nil || *(dbBuildConfig.UseRootContext) { useRootBuildContext = true } - ciBuildConfigBean := &pipelineConfigBean.CiBuildConfigBean{ + ciBuildConfigBean := &CiPipeline.CiBuildConfigBean{ Id: dbBuildConfig.Id, CiBuildType: ciBuildType, BuildPackConfig: buildPackConfig, @@ -100,19 +102,19 @@ func ConvertDbBuildConfigToBean(dbBuildConfig *pipelineConfig.CiBuildConfig) (*p return ciBuildConfigBean, nil } -func convertMetadataToBuildPackConfig(buildConfMetadata string) (*pipelineConfigBean.BuildPackConfig, error) { - buildPackConfig := &pipelineConfigBean.BuildPackConfig{} +func convertMetadataToBuildPackConfig(buildConfMetadata string) (*CiPipeline.BuildPackConfig, error) { + buildPackConfig := &CiPipeline.BuildPackConfig{} err := json.Unmarshal([]byte(buildConfMetadata), buildPackConfig) return buildPackConfig, err } -func convertMetadataToDockerBuildConfig(dockerBuildMetadata string) (*pipelineConfigBean.DockerBuildConfig, error) { - dockerBuildConfig := &pipelineConfigBean.DockerBuildConfig{} +func convertMetadataToDockerBuildConfig(dockerBuildMetadata string) (*CiPipeline.DockerBuildConfig, error) { + dockerBuildConfig := &CiPipeline.DockerBuildConfig{} err := json.Unmarshal([]byte(dockerBuildMetadata), dockerBuildConfig) return dockerBuildConfig, err } -func OverrideCiBuildConfig(dockerfilePath string, oldArgs string, ciLevelArgs string, dockerBuildOptions string, targetPlatform string, ciBuildConfigBean *pipelineConfigBean.CiBuildConfigBean) (*pipelineConfigBean.CiBuildConfigBean, error) { +func OverrideCiBuildConfig(dockerfilePath string, oldArgs string, ciLevelArgs string, dockerBuildOptions string, targetPlatform string, ciBuildConfigBean *CiPipeline.CiBuildConfigBean) (*CiPipeline.CiBuildConfigBean, error) { oldDockerArgs := map[string]string{} ciLevelDockerArgs := map[string]string{} dockerBuildOptionsMap := map[string]string{} @@ -134,9 +136,9 @@ func OverrideCiBuildConfig(dockerfilePath string, oldArgs string, ciLevelArgs st //no entry found in ci_build_config table, construct with requested data if ciBuildConfigBean == nil { dockerArgs := mergeMap(oldDockerArgs, ciLevelDockerArgs) - ciBuildConfigBean = &pipelineConfigBean.CiBuildConfigBean{ - CiBuildType: pipelineConfigBean.SELF_DOCKERFILE_BUILD_TYPE, - DockerBuildConfig: &pipelineConfigBean.DockerBuildConfig{ + ciBuildConfigBean = &CiPipeline.CiBuildConfigBean{ + CiBuildType: CiPipeline.SELF_DOCKERFILE_BUILD_TYPE, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{ DockerfilePath: dockerfilePath, Args: dockerArgs, TargetPlatform: targetPlatform, @@ -146,7 +148,7 @@ func OverrideCiBuildConfig(dockerfilePath string, oldArgs string, ciLevelArgs st //setting true as default UseRootBuildContext: true, } - } else if ciBuildConfigBean.CiBuildType == pipelineConfigBean.SELF_DOCKERFILE_BUILD_TYPE || ciBuildConfigBean.CiBuildType == pipelineConfigBean.MANAGED_DOCKERFILE_BUILD_TYPE { + } else if ciBuildConfigBean.CiBuildType == CiPipeline.SELF_DOCKERFILE_BUILD_TYPE || ciBuildConfigBean.CiBuildType == CiPipeline.MANAGED_DOCKERFILE_BUILD_TYPE { dockerBuildConfig := ciBuildConfigBean.DockerBuildConfig dockerArgs := mergeMap(dockerBuildConfig.Args, ciLevelDockerArgs) //dockerBuildConfig.DockerfilePath = dockerfilePath @@ -168,17 +170,44 @@ func mergeMap(oldDockerArgs map[string]string, ciLevelDockerArgs map[string]stri // IsLinkedCD will return if the pipelineConfig.CiPipeline is a Linked CD func IsLinkedCD(ci pipelineConfig.CiPipeline) bool { - return ci.ParentCiPipeline != 0 && ci.PipelineType == string(pipelineConfigBean.LINKED_CD) + return ci.ParentCiPipeline != 0 && ci.PipelineType == string(CiPipeline.LINKED_CD) } // IsLinkedCI will return if the pipelineConfig.CiPipeline is a Linked CI -// Currently there are inconsistent values present in PipelineType ("CI_EXTERNAL", "", "LINKED") +// Currently there are inconsistent values present in PipelineType ("CI_EXTERNAL", "LINKED") 207_ci_external.up // TODO migrate the deprecated values and maintain a consistent PipelineType func IsLinkedCI(ci pipelineConfig.CiPipeline) bool { - return ci.ParentCiPipeline != 0 && ci.PipelineType != string(pipelineConfigBean.LINKED_CD) + return ci.ParentCiPipeline != 0 && + (ci.PipelineType == string(CiPipeline.CI_EXTERNAL) || ci.PipelineType == string(CiPipeline.LINKED)) } // IsCIJob will return if the pipelineConfig.CiPipeline is a CI JOB func IsCIJob(ci pipelineConfig.CiPipeline) bool { - return ci.PipelineType == string(pipelineConfigBean.CI_JOB) + return ci.PipelineType == string(CiPipeline.CI_JOB) +} + +// GetSourceCiDownStreamResponse will take the models []bean.LinkedCIDetails and []pipelineConfig.CdWorkflowRunner (for last deployment status) and generate the []CiPipeline.SourceCiDownStreamResponse +func GetSourceCiDownStreamResponse(linkedCIDetails []bean.LinkedCIDetails, latestWfrs ...pipelineConfig.CdWorkflowRunner) []CiPipeline.SourceCiDownStreamResponse { + response := make([]CiPipeline.SourceCiDownStreamResponse, 0) + cdWfrStatusMap := make(map[int]string) + for _, latestWfr := range latestWfrs { + cdWfrStatusMap[latestWfr.CdWorkflow.PipelineId] = latestWfr.Status + } + for _, item := range linkedCIDetails { + linkedCIDetailsRes := CiPipeline.SourceCiDownStreamResponse{ + AppName: item.AppName, + AppId: item.AppId, + } + if item.PipelineId != 0 { + linkedCIDetailsRes.EnvironmentName = item.EnvironmentName + linkedCIDetailsRes.EnvironmentId = item.EnvironmentId + linkedCIDetailsRes.TriggerMode = item.TriggerMode + linkedCIDetailsRes.DeploymentStatus = pipelineConfigBean.NotDeployed + if status, ok := cdWfrStatusMap[item.PipelineId]; ok { + linkedCIDetailsRes.DeploymentStatus = status + } + } + response = append(response, linkedCIDetailsRes) + } + return response } diff --git a/pkg/pipeline/bean/CiBuildConfig.go b/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go similarity index 84% rename from pkg/pipeline/bean/CiBuildConfig.go rename to pkg/pipeline/bean/CiPipeline/CiBuildConfig.go index 3c65a1acff..6ce0d26b1b 100644 --- a/pkg/pipeline/bean/CiBuildConfig.go +++ b/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go @@ -1,4 +1,4 @@ -package bean +package CiPipeline type CiBuildType string @@ -16,11 +16,13 @@ const PIPELINE_NAME_ALREADY_EXISTS_ERROR = "pipeline name already exist" type PipelineType string const ( - NORMAL PipelineType = "NORMAL" - LINKED PipelineType = "LINKED" - EXTERNAL PipelineType = "EXTERNAL" - CI_JOB PipelineType = "CI_JOB" - LINKED_CD PipelineType = "LINKED_CD" + NORMAL PipelineType = "NORMAL" + LINKED PipelineType = "LINKED" + // CI_EXTERNAL field is been sent from the dashboard in CreateLinkedCI request and directly gets saved to Database without any validations + CI_EXTERNAL PipelineType = "CI_EXTERNAL" // Deprecated Enum: TODO fix the PipelineTypes in code and database + EXTERNAL PipelineType = "EXTERNAL" + CI_JOB PipelineType = "CI_JOB" + LINKED_CD PipelineType = "LINKED_CD" ) type CiBuildConfigBean struct { diff --git a/pkg/pipeline/bean/CiPipeline/SourceCiDownStream.go b/pkg/pipeline/bean/CiPipeline/SourceCiDownStream.go new file mode 100644 index 0000000000..21b6b5dc09 --- /dev/null +++ b/pkg/pipeline/bean/CiPipeline/SourceCiDownStream.go @@ -0,0 +1,21 @@ +package CiPipeline + +import "github.com/devtron-labs/devtron/util/response/pagination" + +type SourceCiDownStreamFilters struct { + pagination.QueryParams + EnvName string `json:"envName"` +} + +type SourceCiDownStreamResponse struct { + AppName string `json:"appName"` + AppId int `json:"appId"` + EnvironmentName string `json:"environmentName"` + EnvironmentId int `json:"environmentId"` + TriggerMode string `json:"triggerMode"` + DeploymentStatus string `json:"deploymentStatus"` +} + +type SourceCiDownStreamEnv struct { + EnvNames []string `json:"envNames"` +} diff --git a/pkg/pipeline/bean/CiTemplateBean.go b/pkg/pipeline/bean/CiTemplateBean.go index d49e4b2634..509f60f237 100644 --- a/pkg/pipeline/bean/CiTemplateBean.go +++ b/pkg/pipeline/bean/CiTemplateBean.go @@ -1,10 +1,14 @@ package bean -import "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" +import ( + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" +) +// todo move to proper place type CiTemplateBean struct { CiTemplate *pipelineConfig.CiTemplate CiTemplateOverride *pipelineConfig.CiTemplateOverride - CiBuildConfig *CiBuildConfigBean + CiBuildConfig *CiPipeline.CiBuildConfigBean UserId int32 } diff --git a/pkg/pipeline/bean/pipelineStage.go b/pkg/pipeline/bean/pipelineStage.go index f2a1ae2d66..e317584d99 100644 --- a/pkg/pipeline/bean/pipelineStage.go +++ b/pkg/pipeline/bean/pipelineStage.go @@ -95,4 +95,10 @@ type PortMap struct { const ( VULNERABILITY_SCANNING_PLUGIN string = "Vulnerability Scanning" + + NotTriggered string = "Not Triggered" + NotDeployed = "Not Deployed" + WorkflowTypeDeploy = "DEPLOY" + WorkflowTypePre = "PRE" + WorkflowTypePost = "POST" ) diff --git a/pkg/pipeline/history/CiPipelineHistoryService_test.go b/pkg/pipeline/history/CiPipelineHistoryService_test.go index 7f533750b1..e32ffba91f 100644 --- a/pkg/pipeline/history/CiPipelineHistoryService_test.go +++ b/pkg/pipeline/history/CiPipelineHistoryService_test.go @@ -6,6 +6,7 @@ import ( mocks2 "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/mocks" "github.com/devtron-labs/devtron/internal/util" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/history/repository/mocks" "github.com/devtron-labs/devtron/pkg/sql" @@ -75,11 +76,11 @@ func TestCiPipelineHistoryService(t *testing.T) { DockerRegistry: nil, CiBuildConfig: nil, }, - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 20, GitMaterialId: 22, CiBuildType: "self-dockerfile-build", - DockerBuildConfig: &bean2.DockerBuildConfig{DockerfileContent: ""}, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{DockerfileContent: ""}, BuildPackConfig: nil, }, UserId: 0, diff --git a/pkg/pipeline/history/ciTemplateHistoryService_test.go b/pkg/pipeline/history/ciTemplateHistoryService_test.go index 80e4a23ca9..7eb647a46c 100644 --- a/pkg/pipeline/history/ciTemplateHistoryService_test.go +++ b/pkg/pipeline/history/ciTemplateHistoryService_test.go @@ -4,6 +4,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/util" bean2 "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/history/repository/mocks" "github.com/devtron-labs/devtron/pkg/sql" @@ -71,11 +72,11 @@ func TestCiTemplateHistoryService(t *testing.T) { CiBuildConfig: nil, }, CiTemplateOverride: nil, - CiBuildConfig: &bean2.CiBuildConfigBean{ + CiBuildConfig: &CiPipeline.CiBuildConfigBean{ Id: 20, GitMaterialId: 22, CiBuildType: "self-dockerfile-build", - DockerBuildConfig: &bean2.DockerBuildConfig{DockerfileContent: ""}, + DockerBuildConfig: &CiPipeline.DockerBuildConfig{DockerfileContent: ""}, BuildPackConfig: nil, }, UserId: 0, diff --git a/pkg/pipeline/mocks/CiBuildConfigService.go b/pkg/pipeline/mocks/CiBuildConfigService.go index 4064426ce4..dfc4f01721 100644 --- a/pkg/pipeline/mocks/CiBuildConfigService.go +++ b/pkg/pipeline/mocks/CiBuildConfigService.go @@ -3,7 +3,7 @@ package mocks import ( - bean "github.com/devtron-labs/devtron/pkg/pipeline/bean" + bean "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" mock "github.com/stretchr/testify/mock" ) diff --git a/pkg/pipeline/types/Workflow.go b/pkg/pipeline/types/Workflow.go index 3d8713a056..40f036ebd7 100644 --- a/pkg/pipeline/types/Workflow.go +++ b/pkg/pipeline/types/Workflow.go @@ -30,6 +30,7 @@ import ( "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/infraConfig" "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/plugin" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "k8s.io/api/core/v1" @@ -89,7 +90,7 @@ type WorkflowRequest struct { RefPlugins []*bean.RefPluginObject `json:"refPlugins"` AppName string `json:"appName"` TriggerByAuthor string `json:"triggerByAuthor"` - CiBuildConfig *bean.CiBuildConfigBean `json:"ciBuildConfig"` + CiBuildConfig *CiPipeline.CiBuildConfigBean `json:"ciBuildConfig"` CiBuildDockerMtuValue int `json:"ciBuildDockerMtuValue"` IgnoreDockerCachePush bool `json:"ignoreDockerCachePush"` IgnoreDockerCachePull bool `json:"ignoreDockerCachePull"` diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index f59a142003..5dc3fd47bb 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -34,6 +34,7 @@ import ( bean5 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" bean7 "github.com/devtron-labs/devtron/pkg/eventProcessor/bean" "github.com/devtron-labs/devtron/pkg/pipeline" + "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" "github.com/devtron-labs/devtron/pkg/pipeline/executors" repository2 "github.com/devtron-labs/devtron/pkg/plugin/repository" "github.com/devtron-labs/devtron/pkg/sql" @@ -768,7 +769,7 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext bean5.T var pluginArtifacts []*repository.CiArtifact for registry, artifacts := range request.PluginRegistryArtifactDetails { for _, image := range artifacts { - if pipeline.PipelineType == string(bean3.CI_JOB) && image == "" { + if pipeline.PipelineType == string(CiPipeline.CI_JOB) && image == "" { continue } pluginArtifact := &repository.CiArtifact{ diff --git a/specs/ci-pipeline/ciPipelineDownstream/downstream-linked-ci-view-spec.yaml b/specs/ci-pipeline/ciPipelineDownstream/downstream-linked-ci-view-spec.yaml new file mode 100644 index 0000000000..861648d794 --- /dev/null +++ b/specs/ci-pipeline/ciPipelineDownstream/downstream-linked-ci-view-spec.yaml @@ -0,0 +1,185 @@ +openapi: "3.0.0" +info: + version: 1.0.0 + title: Modularisation v1 APIs +paths: + /orchestrator/app/ci-pipeline/{ciPipelineId}/linked-ci/downstream/env: + get: + description: Get down stream environment names for filter list + parameters: + - name: ciPipelineId + description: source ci-pipeline id + in: path + required: true + schema: + type: integer + responses: + "200": + description: Successfully fetched the down stream environments for the source CI pipeline. + content: + application/json: + schema: + $ref: "#/components/schemas/LinkedCIPipelinesFiltersResponse" + "500": + description: will get this response if any failure occurs at server side. + "403": + description: will get this if user doesn't have view access to the app that contains the requested source ci pipeline. + + /orchestrator/app/ci-pipeline/{ciPipelineId}/linked-ci/downstream/cd: + get: + description: Get the down stream deployment details for the source CI pipeline. + parameters: + - name: ciPipelineId + description: source ci-pipeline id + in: path + required: true + schema: + type: integer + - name: order + description: app name sort order + in: query + required: false + schema: + type: string + default: "ASC" + enum: + - "ASC" + - "DESC" + - name: offset + description: page offset value + in: query + required: false + schema: + type: number + default: 0 + - name: size + description: page size value + in: query + required: false + schema: + type: number + default: 20 + - name: envName + description: environment name filter + in: query + required: false + schema: + type: string + default: All environments + - name: searchKey + description: application name filter + in: query + required: false + schema: + type: string + responses: + "200": + description: Successfully fetched the down stream deployment details for the source CI pipeline. + content: + application/json: + schema: + $ref: "#/components/schemas/LinkedCIPipelinesViewResponse" + "500": + description: will get this response if any failure occurs at server side. + "403": + description: will get this if user doesn't have view access to the app that contains the requested source ci pipeline. + +# Components +components: + schemas: + LinkedCIPipelinesFiltersResponse: + type: object + properties: + code: + type: number + description: status code + example: 200 + status: + type: string + description: API status + example: "OK" + result: + $ref: "#/components/schemas/LinkedCIPipelinesFilters" + + LinkedCIPipelinesFilters: + type: object + properties: + envNames: + type: array + description: Down stream environment names + example: + - "staging" + - "production" + - "qa" + nullable: false + uniqueItems: true + items: + type: string + + LinkedCIPipelinesViewResponse: + type: object + properties: + code: + type: number + description: status code + example: 200 + status: + type: string + description: API status + example: "OK" + result: + $ref: "#/components/schemas/LinkedCIPipelinesInfo" + + LinkedCIPipelinesInfo: + type: object + properties: + totalCount: + type: number + description: Total results count + example: 1122 + offset: + type: number + description: Current page number + example: 2 + size: + type: number + description: Current page size + example: 20 + data: + type: array + nullable: false + description: Down stream deployments data + items: + $ref: "#/components/schemas/LinkedCIPipelineData" + + LinkedCIPipelineData: + type: object + required: + - appName + - appId + properties: + appName: + type: string + description: application name + example: "devtron-app" + appId: + type: number + description: application id + example: 1 + environmentName: + type: string + description: deploys to environment name + example: "staging" + environmentId: + type: number + description: deploys to environment id + example: 1 + triggerMode: + type: string + description: pipeline trigger type + example: "AUTOMATIC" + deploymentStatus: + type: string + default: "Not Deployed" + description: last deployment status of the pipeline + example: "Succeeded" \ No newline at end of file diff --git a/util/argo/ArgoUserService.go b/util/argo/ArgoUserService.go index bcceceecd2..0999d0ae13 100644 --- a/util/argo/ArgoUserService.go +++ b/util/argo/ArgoUserService.go @@ -213,11 +213,9 @@ func (impl *ArgoUserServiceImpl) GetLatestDevtronArgoCdUserToken() (string, erro username := secretData[DEVTRON_ARGOCD_USERNAME_KEY] password := secretData[DEVTRON_ARGOCD_USER_PASSWORD_KEY] latestTokenNo := 1 - isTokenAvailable := true var token string for key, value := range secretData { if strings.HasPrefix(key, DEVTRON_ARGOCD_TOKEN_KEY) { - isTokenAvailable = true keySplits := strings.Split(key, "_") keyLen := len(keySplits) tokenNo, err := strconv.Atoi(keySplits[keyLen-1]) @@ -232,7 +230,7 @@ func (impl *ArgoUserServiceImpl) GetLatestDevtronArgoCdUserToken() (string, erro } } - if !isTokenAvailable || len(token) == 0 { + if len(token) == 0 { newTokenNo := latestTokenNo + 1 token, err = impl.createNewArgoCdTokenForDevtron(string(username), string(password), newTokenNo, k8sClient) if err != nil { diff --git a/util/response/pagination/GenericPaginatedResponse.go b/util/response/pagination/GenericPaginatedResponse.go new file mode 100644 index 0000000000..ec61a41be7 --- /dev/null +++ b/util/response/pagination/GenericPaginatedResponse.go @@ -0,0 +1,61 @@ +package pagination + +type SortOrder string +type SortBy string + +const ( + Asc SortOrder = "ASC" + Desc SortOrder = "DESC" +) + +const ( + AppName SortBy = "app_name" +) + +type QueryParams struct { + SortOrder SortOrder `json:"sortOrder"` + SortBy SortBy `json:"sortBy"` + Offset int `json:"offset"` + Size int `json:"size"` + SearchKey string `json:"searchKey"` +} + +type RepositoryRequest struct { + Order SortOrder + SortBy SortBy + Limit, Offset int +} + +type PaginatedResponse[T any] struct { + TotalCount int `json:"totalCount"` // Total results count + Offset int `json:"offset"` // Current page number + Size int `json:"size"` // Current page size + Data []T `json:"data"` +} + +// NewPaginatedResponse will initialise the PaginatedResponse; making sure that PaginatedResponse.Data will not be Null +func NewPaginatedResponse[T any]() PaginatedResponse[T] { + return PaginatedResponse[T]{ + Data: []T{}, + } +} + +// PushData will append item to the PaginatedResponse.Data +func (m *PaginatedResponse[T]) PushData(item ...T) { + m.Data = append(m.Data, item...) +} + +// UpdateTotalCount will update the TotalCount in PaginatedResponse +func (m *PaginatedResponse[_]) UpdateTotalCount(totalCount int) { // not using the type param in this method + m.TotalCount = totalCount +} + +// UpdateOffset will update the Offset in PaginatedResponse +func (m *PaginatedResponse[_]) UpdateOffset(offset int) { // not using the type param in this method + m.Offset = offset +} + +// UpdateSize will update the Size in PaginatedResponse +func (m *PaginatedResponse[_]) UpdateSize(size int) { // not using the type param in this method + m.Size = size +} diff --git a/wire_gen.go b/wire_gen.go index 663248363e..9b5f563127 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:generate go run github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject @@ -84,7 +84,7 @@ import ( "github.com/devtron-labs/devtron/internal/sql/repository/chartConfig" repository5 "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/helper" - repository12 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" + repository11 "github.com/devtron-labs/devtron/internal/sql/repository/imageTagging" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/internal/sql/repository/resourceGroup" "github.com/devtron-labs/devtron/internal/sql/repository/security" @@ -168,12 +168,12 @@ import ( "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/executors" "github.com/devtron-labs/devtron/pkg/pipeline/history" - repository9 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" + repository12 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/infraProviders" - repository10 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + repository9 "github.com/devtron-labs/devtron/pkg/pipeline/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/plugin" - repository11 "github.com/devtron-labs/devtron/pkg/plugin/repository" + repository10 "github.com/devtron-labs/devtron/pkg/plugin/repository" resourceGroup2 "github.com/devtron-labs/devtron/pkg/resourceGroup" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" security2 "github.com/devtron-labs/devtron/pkg/security" @@ -466,10 +466,8 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - prePostCiScriptHistoryRepositoryImpl := repository9.NewPrePostCiScriptHistoryRepositoryImpl(sugaredLogger, db) - prePostCiScriptHistoryServiceImpl := history.NewPrePostCiScriptHistoryServiceImpl(sugaredLogger, prePostCiScriptHistoryRepositoryImpl) - pipelineStageRepositoryImpl := repository10.NewPipelineStageRepository(sugaredLogger, db) - globalPluginRepositoryImpl := repository11.NewGlobalPluginRepository(sugaredLogger, db) + pipelineStageRepositoryImpl := repository9.NewPipelineStageRepository(sugaredLogger, db) + globalPluginRepositoryImpl := repository10.NewGlobalPluginRepository(sugaredLogger, db) scopedVariableManagerImpl, err := variables.NewScopedVariableManagerImpl(sugaredLogger, scopedVariableServiceImpl, variableEntityMappingServiceImpl, variableSnapshotHistoryServiceImpl, variableTemplateParserImpl) if err != nil { return nil, err @@ -487,7 +485,7 @@ func InitializeApp() (*App, error) { customTagServiceImpl := pipeline.NewCustomTagService(sugaredLogger, imageTagRepositoryImpl) pluginInputVariableParserImpl := pipeline.NewPluginInputVariableParserImpl(sugaredLogger, dockerRegistryConfigImpl, customTagServiceImpl) globalPluginServiceImpl := plugin.NewGlobalPluginService(sugaredLogger, globalPluginRepositoryImpl, pipelineStageRepositoryImpl) - ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, mergeUtil, ciPipelineRepositoryImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl, infraProviderImpl) + ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl, infraProviderImpl) clientConfig, err := gitSensor.GetConfig() if err != nil { return nil, err @@ -505,7 +503,7 @@ func InitializeApp() (*App, error) { resourceGroupRepositoryImpl := resourceGroup.NewResourceGroupRepositoryImpl(db) resourceGroupMappingRepositoryImpl := resourceGroup.NewResourceGroupMappingRepositoryImpl(db) resourceGroupServiceImpl := resourceGroup2.NewResourceGroupServiceImpl(sugaredLogger, resourceGroupRepositoryImpl, resourceGroupMappingRepositoryImpl, enforcerUtilImpl, devtronResourceSearchableKeyServiceImpl, appStatusRepositoryImpl) - imageTaggingRepositoryImpl := repository12.NewImageTaggingRepositoryImpl(db) + imageTaggingRepositoryImpl := repository11.NewImageTaggingRepositoryImpl(db) imageTaggingServiceImpl := pipeline.NewImageTaggingServiceImpl(imageTaggingRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, sugaredLogger) blobStorageConfigServiceImpl := pipeline.NewBlobStorageConfigServiceImpl(sugaredLogger, k8sServiceImpl, ciCdConfig) appWorkflowRepositoryImpl := appWorkflow.NewAppWorkflowRepositoryImpl(sugaredLogger, db) @@ -513,34 +511,34 @@ func InitializeApp() (*App, error) { gitWebhookRepositoryImpl := repository2.NewGitWebhookRepositoryImpl(db) gitWebhookServiceImpl := git2.NewGitWebhookServiceImpl(sugaredLogger, ciHandlerImpl, gitWebhookRepositoryImpl) gitWebhookRestHandlerImpl := restHandler.NewGitWebhookRestHandlerImpl(sugaredLogger, gitWebhookServiceImpl) - prePostCdScriptHistoryRepositoryImpl := repository9.NewPrePostCdScriptHistoryRepositoryImpl(sugaredLogger, db) - configMapHistoryRepositoryImpl := repository9.NewConfigMapHistoryRepositoryImpl(sugaredLogger, db) + prePostCdScriptHistoryRepositoryImpl := repository12.NewPrePostCdScriptHistoryRepositoryImpl(sugaredLogger, db) + configMapHistoryRepositoryImpl := repository12.NewConfigMapHistoryRepositoryImpl(sugaredLogger, db) configMapHistoryServiceImpl := history.NewConfigMapHistoryServiceImpl(sugaredLogger, configMapHistoryRepositoryImpl, pipelineRepositoryImpl, configMapRepositoryImpl, userServiceImpl, scopedVariableCMCSManagerImpl) prePostCdScriptHistoryServiceImpl := history.NewPrePostCdScriptHistoryServiceImpl(sugaredLogger, prePostCdScriptHistoryRepositoryImpl, configMapRepositoryImpl, configMapHistoryServiceImpl) - gitMaterialHistoryRepositoryImpl := repository9.NewGitMaterialHistoryRepositoyImpl(db) + gitMaterialHistoryRepositoryImpl := repository12.NewGitMaterialHistoryRepositoyImpl(db) gitMaterialHistoryServiceImpl := history.NewGitMaterialHistoryServiceImpl(gitMaterialHistoryRepositoryImpl, sugaredLogger) - ciPipelineHistoryRepositoryImpl := repository9.NewCiPipelineHistoryRepositoryImpl(db, sugaredLogger) + ciPipelineHistoryRepositoryImpl := repository12.NewCiPipelineHistoryRepositoryImpl(db, sugaredLogger) ciPipelineHistoryServiceImpl := history.NewCiPipelineHistoryServiceImpl(ciPipelineHistoryRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl) pipelineConfigRepositoryImpl := chartConfig.NewPipelineConfigRepository(db) configMapServiceImpl := pipeline.NewConfigMapServiceImpl(chartRepositoryImpl, sugaredLogger, chartRepoRepositoryImpl, utilMergeUtil, pipelineConfigRepositoryImpl, configMapRepositoryImpl, envConfigOverrideRepositoryImpl, commonServiceImpl, appRepositoryImpl, configMapHistoryServiceImpl, environmentRepositoryImpl, scopedVariableCMCSManagerImpl) - deploymentTemplateHistoryRepositoryImpl := repository9.NewDeploymentTemplateHistoryRepositoryImpl(sugaredLogger, db) + deploymentTemplateHistoryRepositoryImpl := repository12.NewDeploymentTemplateHistoryRepositoryImpl(sugaredLogger, db) appLevelMetricsRepositoryImpl := repository13.NewAppLevelMetricsRepositoryImpl(db, sugaredLogger) envLevelAppMetricsRepositoryImpl := repository13.NewEnvLevelAppMetricsRepositoryImpl(db, sugaredLogger) deployedAppMetricsServiceImpl := deployedAppMetrics.NewDeployedAppMetricsServiceImpl(sugaredLogger, appLevelMetricsRepositoryImpl, envLevelAppMetricsRepositoryImpl, chartRefServiceImpl) deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl) chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, utilMergeUtil, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, gitOpsConfigReadServiceImpl) - ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appListingRepositoryImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, pipelineStageServiceImpl, ciTemplateOverrideRepositoryImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, ciArtifactRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl, chartServiceImpl) + ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, pipelineStageServiceImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, ciArtifactRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl, chartServiceImpl) ecrConfig, err := pipeline.GetEcrConfig() if err != nil { return nil, err } - ciTemplateHistoryRepositoryImpl := repository9.NewCiTemplateHistoryRepositoryImpl(db, sugaredLogger) + ciTemplateHistoryRepositoryImpl := repository12.NewCiTemplateHistoryRepositoryImpl(db, sugaredLogger) ciTemplateHistoryServiceImpl := history.NewCiTemplateHistoryServiceImpl(ciTemplateHistoryRepositoryImpl, sugaredLogger) buildPipelineSwitchServiceImpl := pipeline.NewBuildPipelineSwitchServiceImpl(sugaredLogger, ciPipelineRepositoryImpl, ciCdPipelineOrchestratorImpl, pipelineRepositoryImpl, ciWorkflowRepositoryImpl, appWorkflowRepositoryImpl, ciPipelineHistoryServiceImpl, ciTemplateOverrideRepositoryImpl, ciPipelineMaterialRepositoryImpl) ciPipelineConfigServiceImpl := pipeline.NewCiPipelineConfigServiceImpl(sugaredLogger, ciCdPipelineOrchestratorImpl, dockerArtifactStoreRepositoryImpl, materialRepositoryImpl, appRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ecrConfig, appWorkflowRepositoryImpl, ciCdConfig, attributesServiceImpl, pipelineStageServiceImpl, ciPipelineMaterialRepositoryImpl, ciTemplateServiceImpl, ciTemplateOverrideRepositoryImpl, ciTemplateHistoryServiceImpl, enforcerUtilImpl, ciWorkflowRepositoryImpl, resourceGroupServiceImpl, customTagServiceImpl, cdWorkflowRepositoryImpl, buildPipelineSwitchServiceImpl) ciMaterialConfigServiceImpl := pipeline.NewCiMaterialConfigServiceImpl(sugaredLogger, materialRepositoryImpl, ciTemplateServiceImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, gitMaterialHistoryServiceImpl, pipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl) deploymentGroupRepositoryImpl := repository2.NewDeploymentGroupRepositoryImpl(sugaredLogger, db) - pipelineStrategyHistoryRepositoryImpl := repository9.NewPipelineStrategyHistoryRepositoryImpl(sugaredLogger, db) + pipelineStrategyHistoryRepositoryImpl := repository12.NewPipelineStrategyHistoryRepositoryImpl(sugaredLogger, db) pipelineStrategyHistoryServiceImpl := history.NewPipelineStrategyHistoryServiceImpl(sugaredLogger, pipelineStrategyHistoryRepositoryImpl, userServiceImpl) propertiesConfigServiceImpl := pipeline.NewPropertiesConfigServiceImpl(sugaredLogger, envConfigOverrideRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl) deploymentServiceTypeConfig, err := pipeline.GetDeploymentServiceTypeConfig() @@ -589,10 +587,10 @@ func InitializeApp() (*App, error) { imageScanHistoryRepositoryImpl := security.NewImageScanHistoryRepositoryImpl(db, sugaredLogger) cveStoreRepositoryImpl := security.NewCveStoreRepositoryImpl(db, sugaredLogger) policyServiceImpl := security2.NewPolicyServiceImpl(environmentServiceImpl, sugaredLogger, appRepositoryImpl, pipelineOverrideRepositoryImpl, cvePolicyRepositoryImpl, clusterServiceImplExtended, pipelineRepositoryImpl, imageScanResultRepositoryImpl, imageScanDeployInfoRepositoryImpl, imageScanObjectMetaRepositoryImpl, httpClient, ciArtifactRepositoryImpl, ciCdConfig, imageScanHistoryRepositoryImpl, cveStoreRepositoryImpl, ciTemplateRepositoryImpl) - pipelineConfigRestHandlerImpl := configure.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, deploymentTemplateValidationServiceImpl, chartServiceImpl, devtronAppGitOpConfigServiceImpl, propertiesConfigServiceImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, generateManifestDeploymentTemplateServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, policyServiceImpl, imageScanResultRepositoryImpl, gitProviderRepositoryImpl, argoUserServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl) + pipelineConfigRestHandlerImpl := configure.NewPipelineRestHandlerImpl(pipelineBuilderImpl, sugaredLogger, deploymentTemplateValidationServiceImpl, chartServiceImpl, devtronAppGitOpConfigServiceImpl, propertiesConfigServiceImpl, userServiceImpl, teamServiceImpl, enforcerImpl, ciHandlerImpl, validate, clientImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, dockerRegistryConfigImpl, cdHandlerImpl, appCloneServiceImpl, generateManifestDeploymentTemplateServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, policyServiceImpl, imageScanResultRepositoryImpl, gitProviderRepositoryImpl, argoUserServiceImpl, ciPipelineMaterialRepositoryImpl, imageTaggingServiceImpl, ciArtifactRepositoryImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, ciCdPipelineOrchestratorImpl) gitOpsManifestPushServiceImpl := app2.NewGitOpsManifestPushServiceImpl(sugaredLogger, pipelineStatusTimelineServiceImpl, pipelineStatusTimelineRepositoryImpl, acdConfig, chartRefServiceImpl, gitOpsConfigReadServiceImpl, gitOperationServiceImpl, argoClientWrapperServiceImpl) argoK8sClientImpl := argocdServer.NewArgoK8sClientImpl(sugaredLogger, k8sServiceImpl) - manifestPushConfigRepositoryImpl := repository10.NewManifestPushConfigRepository(sugaredLogger, db) + manifestPushConfigRepositoryImpl := repository9.NewManifestPushConfigRepository(sugaredLogger, db) triggerServiceImpl, err := devtronApps.NewTriggerServiceImpl(sugaredLogger, cdWorkflowCommonServiceImpl, gitOpsManifestPushServiceImpl, gitOpsConfigReadServiceImpl, argoK8sClientImpl, acdConfig, argoClientWrapperServiceImpl, pipelineStatusTimelineServiceImpl, chartTemplateServiceImpl, chartServiceImpl, workflowEventPublishServiceImpl, manifestCreationServiceImpl, deployedConfigurationHistoryServiceImpl, argoUserServiceImpl, pipelineStageServiceImpl, globalPluginServiceImpl, customTagServiceImpl, pluginInputVariableParserImpl, prePostCdScriptHistoryServiceImpl, scopedVariableCMCSManagerImpl, workflowServiceImpl, imageDigestPolicyServiceImpl, userServiceImpl, clientImpl, helmAppServiceImpl, enforcerUtilImpl, helmAppClientImpl, eventSimpleFactoryImpl, eventRESTClientImpl, globalEnvVariables, appRepositoryImpl, imageScanResultRepositoryImpl, cvePolicyRepositoryImpl, ciPipelineMaterialRepositoryImpl, imageScanHistoryRepositoryImpl, imageScanDeployInfoRepositoryImpl, pipelineRepositoryImpl, pipelineOverrideRepositoryImpl, manifestPushConfigRepositoryImpl, chartRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl, ciWorkflowRepositoryImpl, ciArtifactRepositoryImpl, ciTemplateServiceImpl, materialRepositoryImpl, appLabelRepositoryImpl, ciPipelineRepositoryImpl, appWorkflowRepositoryImpl, dockerArtifactStoreRepositoryImpl) if err != nil { return nil, err @@ -783,6 +781,8 @@ func InitializeApp() (*App, error) { pipelineTriggerRouterImpl := trigger2.NewPipelineTriggerRouter(pipelineTriggerRestHandlerImpl, sseSSE) webhookDataRestHandlerImpl := webhook.NewWebhookDataRestHandlerImpl(sugaredLogger, userServiceImpl, ciPipelineMaterialRepositoryImpl, enforcerUtilImpl, enforcerImpl, clientImpl, webhookEventDataConfigImpl) pipelineConfigRouterImpl := configure2.NewPipelineRouterImpl(pipelineConfigRestHandlerImpl, webhookDataRestHandlerImpl) + prePostCiScriptHistoryRepositoryImpl := repository12.NewPrePostCiScriptHistoryRepositoryImpl(sugaredLogger, db) + prePostCiScriptHistoryServiceImpl := history.NewPrePostCiScriptHistoryServiceImpl(sugaredLogger, prePostCiScriptHistoryRepositoryImpl) pipelineHistoryRestHandlerImpl := history2.NewPipelineHistoryRestHandlerImpl(sugaredLogger, userServiceImpl, enforcerImpl, pipelineStrategyHistoryServiceImpl, deploymentTemplateHistoryServiceImpl, configMapHistoryServiceImpl, prePostCiScriptHistoryServiceImpl, prePostCdScriptHistoryServiceImpl, enforcerUtilImpl, deployedConfigurationHistoryServiceImpl) pipelineHistoryRouterImpl := history3.NewPipelineHistoryRouterImpl(pipelineHistoryRestHandlerImpl) pipelineStatusTimelineRestHandlerImpl := status3.NewPipelineStatusTimelineRestHandlerImpl(sugaredLogger, userServiceImpl, pipelineStatusTimelineServiceImpl, enforcerUtilImpl, enforcerImpl, cdApplicationStatusUpdateHandlerImpl, pipelineBuilderImpl) From ce94374f590fcd2963ff44167c4e74f4c6387904 Mon Sep 17 00:00:00 2001 From: Yashasvi17 <155513200+YashasviDevtron@users.noreply.github.com> Date: Fri, 22 Mar 2024 13:53:57 +0530 Subject: [PATCH 13/29] feat:Github Pull Request Closer (#4833) * PR plugin script * Update 230_Github_Pull_Request_Closer.up.sql * Update 230_Github_Pull_Request_Closer.up.sql * Update 230_Github_Pull_Request_Closer.up.sql * Rename 230_Github_Pull_Request_Closer.down.sql to 231_Github_Pull_Request_Closer.down.sql * Rename 230_Github_Pull_Request_Closer.up.sql to 231_Github_Pull_Request_Closer.up.sql * Rename GithubReleasePR.png to GithubPullRequest-Plugin-logo.png * Update 231_Github_Pull_Request_Closer.up.sql --------- Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> --- assets/GithubPullRequest-Plugin-logo.png | Bin 0 -> 36897 bytes .../231_Github_Pull_Request_Closer.down.sql | 7 ++ .../sql/231_Github_Pull_Request_Closer.up.sql | 93 ++++++++++++++++++ 3 files changed, 100 insertions(+) create mode 100644 assets/GithubPullRequest-Plugin-logo.png create mode 100644 scripts/sql/231_Github_Pull_Request_Closer.down.sql create mode 100644 scripts/sql/231_Github_Pull_Request_Closer.up.sql diff --git a/assets/GithubPullRequest-Plugin-logo.png b/assets/GithubPullRequest-Plugin-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..61ea0ebf12dc0a2ee0181ed78780b8ab61ae4d8d GIT binary patch literal 36897 zcmeFYRaBcnxHd|WLXkoXlp;loTk+zhKq*k%gG+FCPw^Hn+G4@o-K97bDNd2#6b%q0 zfe`+5pZ)*)T%2>UFV4kTYiBJMkeTnDnP+C+dERH<@2k410ueqHJ_ZH`k>W==O$>}j z!T)~n9;3gB_~V8||N7#ut?#L6?nCeD?qY4{2&DJ)a|O}^eeJ9w95OH^$nRvyHg0K zUgJ&-4E)D-=sxMId=j&CapEzzaFul}8TEuz-l(|B0&VR+`nvdql%}4B%{8HI=!rmJCI(GN08?Yx4f^NH$S5kKD~sym9?0r+=u@( zf&NdD(bm(`Rg9O{$H#}qM}Wu0-G-M>R8*Ar4L>hGKR3Dtw}+pzr@1e;vj@{Z6aSYD zIiQE7yPd12or^R5Kbz(jE?%CJjEv}V`u{Zk5C8vT-Pz+mK|sU7`>%wTkLL~V|Gm1W zo%R1q^?xP*RsG+?V(NCjKu3K!J13yC2f7QAi~?`@|HIgSl^Xm%l>W0Mrs!mD1Jtv# z^0fW0$NsC}Bhb#q7Cn;xZSX%z)ZFcWXbk?1R+5pAk@p{xy#EO$dS-b4&(!?G=l}c9 z|1|R7!uWsS`X9LdTL}EOD*w;w`X9LdTL}EOD*w;w`u`VP`2R)jfX-+}=Yythm4XN~ zZN@lhOA5bDZtd3Dohs>2Wv-x^45K!V>V5BU8sFGkn(O4-oA1A>JHD@k+*a909^I6dm9fW|J+8zlTGgxl9Nk^~4JCQw zP(-FQf%L<-z7Gm{ml!K5z6doGKl05Ck|^I>O63y~5A?&f%Wu)o#7?O@O@DD?V?rhm z2rEf8^LXT-!G>vRhh6o^h8vqDM-Cga&ruyeP}1YrDsaTNTy!W&S}*)@tlI-Q;An_X z_?vy2Eu*WYAxmgb7QBSCtH~_il(YAStQ%KJ;h9w~zysSfJM^5xHwS2;U$VM{VPnc1`3g>(h87U3!*hNDVrHWK``x$lqWf>O zCiuK}{N+Vmp2*6ZKEgLpXTul85qs|NIFuxGy4|jqL)*Townh!x-c;#I|LDuoAN}8) zv0h42M`8O44zgHGq-ozcBt-2gD*)wQgA>G`>mu)(`_dG+BVsMw$1VnJ8^wC7u+f8j zcIZIih#6v5eSV<(Yc^RVB;<|e6`(H|OMQ&9oM@bnnk=dct4ffS1~a0vs3%unJohD6 zubF5LA(we>DXudxH}$>3Hs2n06Y&HlXAazObp2UZ9ln|jgN+R5&)$nOZ{NUydM^rz z#x-{+XHtDS z^xf8I5i1>lGh*EvUh9qV)TH38Ejdj$cq?~4`aa6+K6k5keglCYTLjPwf6?f^{dT=z zg@Pr7Tp6kIGa=Zi>Zewkk#0!yC+&@NVn8q=ad-2TYM(S8=E|_yV&Nc+p5j?2w8@#Q ztx?@SK=C39v+p9oE?=j?K3=j4FkGa`GpI$()r9erBvL7Q{=ODMPtzddw!z_hEfZ}+ zSII{4sGxM7JI|}ay(k~t6UzFTTn5>oxBj~4S~8-Bdu52LMG~(?(hRZ7l-AJ#XpVm; z`6qS)u@N9A`XPz4XyK-bIq%&xCnIl{z54I)4HisglK0%M+oDJPsIarfd-AFE(zmB1 zgKoI}?ZNV9t2wKlPk)&3y&izzghSJ8o**1jxgho-o1o^OIxT?eWs7A#bIJ;~Voc?s zuq-;9CuW+`vt_spz9G3Stk@Y=uTJhPxaTmHnZi43{_+BJQ^|QHKb*Jkv&H|RbMJO8 zj!=mw5&K}u)3xH#1@5ai%Yj#(NspCy zNzt>Vq)IM7KvCrJ-ZC?D*nk+{U~uz@%SBQVc40~050sa}8fv}ACi@-y&ZPzw5D%Z_ zfV_gfJJI-mqRpl*|eM^nQ(`cP%o6}u%JYEBSAQ|a`oI5KJ&wJLc zVpMgF{E34Z@(HuSEwoO(!jL~_3uh`^Ky4u_Fx+|`W6u86CFN`3Mz`)`Achl z0rAq+HaTxEELJxM4`T-&WF)%Wuhv_x=B6^HYkZ$$mkN1DukRc@qm(B!NA<#zyW&>R zB_L-jKi$8QLxEO?MapO)n>C~(5^m^lv8K3WG?d+ax0p$oZbIR{xLwV`deqSnP#j~U z9rCW$eN=IOPdacW9L@Skt^+ z7K)Y@oaD4wEZiMmr0Pa#^~=o{d3`b}W8z~W!E5)zr003$g)-g6%nGXC`09ewkTV}yJ-+ES4UB{?0No@f>1Pkjr-a%^Y3Z*D5ScA^ zG@Tzbez+gxlHvHg4*TuBrDj;s{-if?%B~t!w7CtjJIfq4{#qO->>|HQfE{Y0{Qzha zPD~c^z4eDTh*O8I6Sq8A*WFS}?&dq~l{F*NTc^3ugFIOlwrATXPb-KYHjm&RS)C=_7)V@J^Ad8!r9>%K)YG0s^iKKFxs~<^}{~$l`+j8-4j7}3L zc5HF%=Gk$O#vcXFL{TLTetU;*k?SG>F%6Wypd8#4c{~5`TLj@dv@AVP*t2@;v;0N6 z&Mdd>n*$eeR0ONKV>WS$ZEKA783MgcO@Clm1WzH-YF0@w(xgEJ2}6i@yH~LNi(O=T z1l+K~C%;{%2)@)py?S=^{c={9=wByYzV&EM8o;Be-&aQT_7O~B@y>!GbjuxLqp{KO&+gPTVFKHb{wW^f3P%uN*=JJc*5P9Lae$oX2?1P9bP{c``UhlM(- zP7KLs!DH@XLi_1}Jx$gW1q6Lrq@rnX7Y zDy`-(j`vI){~FutFUx7xK1|c2IXZ8GHDC{w1y@}qWd#8|b<{c!%&PJewE__tpBY<;yPSUayE^fD09@!PaHiBz{$H)evEGRnk+S#@eV5%~@p-BTAu@8=TO=2GDcm%o>8XD3U=B;+yYT{O}wT&q+{USq_}; zX`IJOR_RYlklsGUP1NOXlc2`4O4k{m^P1EndWsb&K7t^Pf1xq-oW0+J_zGS^Y|T!= z$Z1pcLLNn+i9CPAb@U7N8^7DttCyFKKgHv8pjw}kK=<`QiK-C*YreNV!=dVV#dFR& zUj6646YA33RnNzx2u)kXHq_QPNdL(B@(MzF=(iGr3c{u}dF-ouBzH2UpXBRWD!ooH z+vT|bvWm5hx|?pwDEYvh8d+g`Bu6dTr^Etc&1QPq0QC5@Mk0JQ1#WeQ!KHunHaHuw7ut24%WJ~l z)oQ<4a64Mi7;X|DcABel-NfzKtAeMxm(i$O2}FZB>iTpC@{|7J>_f zwrkL57-c3u5|K$@)KMfu(yo=x1LZ}J(_+5HD7`u_f2%x`kXMXjqjHvC+u_?j`8mb`NjLrG&(vIQ&ynOdnW_{q^F(#rif5ElTg^aC&ag?dx zUF|^K$IjuMw?4D8LBiFVb72wLhgbU|B&*UjP2zEP^K>|9er=xnz10mLn=|IJK%%@U zR)@&OaqPU!1Am9#neohXM!vf32|Wo{)S-y_uRfI<>h9Zu&(inY(i|LbVBQkf`!@-k zm2R&rP6KAns;IG){K7ABI5RIpewXjYYSkMdC(#U83t#}r&TrJWd&OZ(jYz-vS8?*K zfya{A)T@;F@j`fJmj_eKbkBp^$mA1Qse;`&JYBYfj)~Q8isO_e*6=w4dHm&4BLn6x zjpUi#15=#PB32O2an>8;eS$3Nu!<`4aKE($(+GMmZClrx)D)2O#>lJTsF+!YFf>*6 zRycLm$4wI8CVH=yW021aZvBOiogJ$M39E2^+P42>d+f`;fqw4IHC_F})zpO5rh_!$ zvZ(oaN|MaKxv#NKam;a3N>c+HTjjoNd7k>6!V_@CDN(c}YL56rEX%+A_Yy9lBz~2H zE4M|SJTjeZ>wT#2JZuK9NPqb?8V% z^MCnJo+co&3%^|EJ6K7Zn>;R4(Ljby4!SZB^!P9RbF37d68!^(@&}T7aCiq;$FVW2 z@+G(txz(NcmA&6pkyI|z3{%D>DLAQFL;nLu{H3&)?l(;yhI>h<<&$#)F|uL&Ks5Gt z@WCJk8`axc%`xNd+K*221%JtqpT%jzP4+8lsC-$ z+PyraDb!Ogc=zji5%8S&VDx<*R0zLOW0fU)d#J?dYLa=MjD2TM?e-idB)m>cT0tvw z2kfa!i`{?gzw!mxBO@=_*l`6t-&ciU=;jHn`nvQI`9!9;N7Iv)L7paQmRh%SH{*OE zFZ#N!VMcTCQUJzkdm>g~w`(070fW^FGnU&jemM%!yH<|NDXE*N{(ufeew6IB?HDuT zQwk}J?yakgTmND1mz`FXw2aOF9!7mi*ml1DSwwO%>YY5mpJ@2EssSnw|5*z-+mJpQeNUVIDIcH-6kg^{(lHo|VA)O*{A*03BZW_iGXuk5VjL zeVDwhCKD;>({G8A<)E`YQOd;DWOXlyn6#aD-7BU6i)^Zv!rFPC)bS5!gtY{zW zW?7pLpL#f!o`Z1Z#nPKfQ8WF9Vp9R_(7q&=ZB9=?Kfzds7ySHoe)g-iUM1#7s^d)J z?`c#F+15)x5ak=$=8Qd*F38bF}Dny_E;Ctm?9Myy;UR zd@<6ah|1kG-1pup)=Quzi^>e}RswFwhx4SC6ine4OSjCp3?50m8VC2r+i(Z!sGqoT-1?1 zyw-c4u3ELsqp6`!ev|^na3W;vZ_>AL+HA6r0EL6>-_bIhf({`%e6LDj0!2e-ds6M07xChlCHR4GqEGp?|EBGi4em^8|(H$J*3D=HRz zOnte9J^lXnH*_$nLCVj1)DQX{;j(jzP-JSOm9epq`ZJYT3UKaZ-5HRTY|Id++OItP z4bUhv=mt5w<00inQ%RoqSj}*-s_dsZuAInp>GA#JS@$eYxyU9g6Bsmdh-FNNIkUNg zsj6|Muz;z=b$hfpMUTcKcbE^?zw^9J)EUIKkttnvzLrTHt4mmv^>en0REPHy1~;oa z6P355yRm;smZ|<#JV{S(<}(q{IFQDs2O2>cEz0V;Cy!J@YcC!M^Tl;##Z&ihRw+3c zy_!{&m74!BA?{+FvYQ&w9ASajh&6q-B)7&d9^3=v za)oKh^BN(?H9KGTU#=s6+9uNP+ccl$ww}oJ+mcIgXSNf7D67T{Ia}^b$_$^H?w|MR zk-vqdaJ9v316z0EFs5_)wW)uWS}q7|#-G#=!Y%p0Ukn#u+i2DtHLre|qI|6$J9Q~baX z)HlWq8fKR67?ul&`t|3B!;8Th^|5I@L1GDVeh3zK=&~XN##KPBFI@^hb0Fxv8N^xr zP{lN-U)20O8E{Si^`VQh&yp+1Z;-AhZ{$goKXSK)6M4~>*q0xTv~$#BdpIbBmirLe zcE%^1Rt^Cba14l(H^Cvsq-jo; z40ptcxvC!wT=zee1E+I#hdN{r^H$hMf2GILpdIVWvX>@4@6&j@I)14e%;Fh?!kky> zv*VDdns8d|JYa1hd_a8clAJ6ZX_#vCVbkX`(lE31?RM|ia;;m6o`z5C(9^})6aPtc ztg(hw#dSpze4ClLO$@6I$=I9|)jC%QW=cp{D!o*p-3gq1x-nM%f#to4M_{}%WOn$S zB5C+zQy=MrDS4E+?Ipfq^@Ae=&@PUr}P%k`XGgj-u$X)fv`^oHp zGYYd-mH}RU$|Q)FO644>6jzJ600rMw4Y2!ebmA={oz2 z78m+suNA_{wTQ;KiK#;0zw9XcU6QU$7yBEGN-UQ8o|H$J>;M5U-C+oWN|Fd zahEk;PoNm#IL7=YZn%O9ER20MWc~of60mO7g4p_qTVI^bK$-c=vE})0eG3 z5%ti@nd)pvuxOqKHoPjT?I6GBx5j?8_j)Rm&4#`M?M5KA%p8$8CFfrwncu>7y{(qMWSHj z(PLR=l{XWMAoD^n{J@X^&AurAv9DFBXi+gGH0g5;uQpJ9`JUxd?A41)zXIayoR3CF zzijTCWknZ_wB}%@`-O{RlFVxImSw|Z2VP!QDCK;oS%Au>|b2~`c z{6QVKezhgB&>kksr5RDI%oO%_nqmyiHfaO-H}-EHBX_FBd|Zf@g)OoXnO@YRZLttfvfK-&HjjdC3F2R`ZWa$hB^wvu)F!B7t{O3(aS@Q1{DRhmjUbo~qFF*W z`lqfO?O5=Q+?{^}4l4;8Zp!0Ypx5=dodPhO&F$}H`xEZ@$DbCsn8?SsJKmo{Ubs(f zGu%kfp7kcDITbLFYDJoj2~DEiRxEUjS@2vFwA=WbjO%rVkf$WAVS_G$NDnD$h|vcZe3-pbj- z-c%{|)o$%2Nh(%Zw;+2$XVB{n(_mf8(n3GftUxEi&(Fl)F-<2(Omn+V2=>$cftyqz zMOFe@gj3y7F!cAe6~7`3-i7$>L0R2psg)Bmbx@F4fF@o$IqA+@Eu$QM2hXEae+NV- z;TB0$zWQ8Jb>r73ki>BR_c~FWj;QUb?{Ui--I;Vkq(r-bRi!=qUT&hB)S=AxW~{my zElWDm;)S36E8Lc(@vJjhDU87-FKO4F)qACX<-Cx)n&SH#=Vq5~KXFzW78raqaGc~j z-%<#yWV$yzb*V*2R~UII$9+>&25Holt0~;%Svy|&QSrZ16qP=u{|xGBniT4(^tcre zFoM@J*1YAxT`0QjlwTrerG=j!ok!hv{{74KfSd(zOf>FS!j21FM7QTK!J-v%>>`Gv z)~u|c`TX28&2u%dvg2!Eu4+P;e)|NXiXikVpo#0P-Q7$CL$vmLf@e1Oj3ZeUh;VDJN=3eN&y$t~L*@$z4xp+h;>;DF0T?#fX(ec4 ziMl>_zI~q(>>&22y0v?35K?imk<5T5hV743UOw|a(p_(?;h;SG)1Z0A)t(?4jYzWB+=jYO?03>n-MCbBXm!8ddApd9J#lK@XfJv+M+24^_rs#h z%!>N^LcXbEs`1UB^st;oGWp|H@?t*W@eMhetYRk@^tH{v?sQsCvS4KgE%&zgB}6SHbwhxU1>8!o}U`UD)&qr1CJ#9t*9* zMs$Gj;H%7HEQBI0g^?DiFBexSuS`nzvm2@SatvQ01 zvGjlNSym2e@i>^pa>nM!AcsQJL(l<}?_c)qa{llu8$Ii>4Z9Y^b+n}i&6~fvo4a8Z|iPtbraG|^V*(E*XT(6CI5Cr|--Q`sc=e#AeKouk`FEBk91 zU$?tC-O=N~PpkUr5xaNUsbN}_E`iSR)TgBdo@7SV?VzDU_G+zh_ z{u|pg#d5zKU*(SwIV=h(6mPPTw^ORSN%A?F%=fJCaS%ld<=Q*W`WK6 z&m7{UMpeD6$yteaFY$X*8ika=@U2XO|3*%_Q*+v<9H0m2bD`?PtCAfAvV&ukvW*B0|moc^KWH(Hn|&o1Jo%=$6hv@j2k*CPTywBWN!a*w~` zbM9)bog&>{!V(~SK`!GtJ(1cwqggKs;LGx}4?zo}&Th+O^`_-yBxQT=H0Xd1e!C4Y ziTw>OQ+R^nKNsgaKVv$Fop+)yuF|JT`hqQ+X0pkn$UnpHxM)9vT;{g!eHBjHrqtlG zqb}=Hf7I}$rOpXQcre1O*T==Z%P|?00IVvH26f}$iw;&1V+Y<8vrAOraT93XE3rFX zaxD-C(6kO&mOC@`w=Y~yejL1rSMihb(xtoio8PLpE2=e9WDW781DbMDg#g!bx3*oN z+ZV7qQFl1P3i2UJ|pIpQi~8x{!jdF;3LMDznE zOVfEZfYo9%B&R))*jrVfcG(>8HduEHT)t8`+HN#4E}WWNA-oJyM+;RgJ2ezZ#Cl5S z@GqMg++OaKtbI-byUm?ob{^a~Jz50e>a|&~6EQZAkR`=+n+0i`rmEXtK#d)JansO} z71NCidK;L^D34l4&Gk6o90sO6H8}DZ#KA)L9YxQ{mg2H-)TtB&TTI!t`ESkOALaqe zooz^Zq$Qz8<^Gx~g4j>;ghXh_9dL#2aeU4cD{yE7qfwLD34*N-4)54&p7h)1*PV!J z*k$4lb0I4C%J}+KZsz?q1sYT+)P&S5z8b%4io}ZS8qt_jvESC|r_96;L>b@jFyT(A zwmY649YPEo#QC(w?4)kf_^B6tk%KKPFEi11M9w{@c?Im*f$VPH{@&8VV?W)M$cAd`AK_rL$05#4HeR$jDAh`<8&5@QzP}E!?+o{ z#)IyZ8cvbNQB&k@^>FA2+XQ!s&`HpB+73+=l$|z^!|JZp+)`C2b7ZRKla6SzG|*se z|7rV4)|)I?Xz0;5@hU3;76zI95owc?Vc`uly%~Pz_L(>k7#l!Kj1ERQK%0>!zHN2C zz=-zRJ*eg-__f-5vQg!{PXOOk{*r zw#7bv)h05X9C0Z=ekmWos9C>!Uq%Pf!VcM73aby?%@2+j>$NGg$dVk5)l_TK#W)}L zhG&^b)+xj*?QS?l@4k449GY?TDASITHuWTyF=L{b{&CaMaz}A))atE%j!LJXap`S z4RCD`TLHLBppOD34T^tC?9S<@s?zBPq!Y4}TGON()g6|?2lj*7g*1k`Oa3MZPtIYI zU+1$+1J`J%lug$WH~L2`VG(#RxPUKyw-1aA+GCMY)$u-1jX5(sAKoDat!eURZIexh zJsh{)Uy>W~;;Rj>n^>IC5A-B`l`fC*D*D>&m+_nD&3r)$BRB*tV^ifR8Bi?=6T7c8 z7M4aWWNWao*i}Uo`dgrH^w7c=G1So&Ra79++|T|HLDl!<4?A8`H0L7gPqet}9(Y_8 zWR`srR`IMQAFlWKeB65E81*#$mbv!gg+-V$VdSn?NZ z&7J^mJvGbGrSV8q;~~}A;M(Xnu)RHXq-zkw6mfZ)DzpgE*#9}M;0G_u{c-*50*Xpe zJLW8M#d*?m*Un$#uNwcgZ0W*K*gpVhsYmwEN&Vv`S+(C_voS+KGRu6w*(08`Z^l2L z;ld>>3x?q-eX~_@T4_B!?=;^7stm8G7qVP!S=>*w3D|U+RFa$N8zqq18%82c%z}}d z^?U8&hk<9D1rqIRaue3Y6(3|;${*e~38>GZ@0)^X+Eum6meT}_*dB6pkQcSqB%s5M zXlVa-2WE3zhII}I#gpco_|m~(u?Bs3dSHhrvlmgPHWn1`Ujh-GRT?uinTlNo4Jb#Ti5@XE;DhJvPaiBrZ}sFo`l$cI5AU#1R2wRdY{Gj$n%} z(EbD#=z;g-PwJ#YgJ)&5m4;(`=v%;N{r%$a7aLL{JS*HqXZ7W~E=2vFLZ)mi#;#Pn zD!%L*iw!$5Hfhdb{^jhYRvSy6cr82%b7KCXdxDl;I_QY=oh=l3)h*3SOIw4LvtzpX z#|p$lDd(l$qt=vBZ1WO>s6$fxteY7|OY-DsL$P2M`D&aZf9wX;h;J~f1-;l$RKfQj znGPL(2-bX`dR$YH>^=W{nCAOtRSB6bw(y~LQCianF`i9N5zDeU!V=olU7_K9Cc|jk z&%C**R#_0x#%SWOcf^iB8EMD)&7*#x0*b;!c+67#6@r@m>eycpqE>kLuviiEFSJ1S z_)|0zcyVficRrf<$CXq+!(00%*mReQ{b`U5^x%^B-RLQ}K4xcA+GG8GA za)w^MhCp`9rMqM6Dk;wp{fp^rR$p&#Sa7f|9tT9gGaH(W8jLq?pHX;38`9$W`UXsE zixB+Th$E}7ON*-rlDa6Cx+RFV53^;KnPu=Ot1bkWYdJMx1fz2vRmB(=D~}Dn9L86q zR5qcNNV;_m)+5&J0RXfw$fdux{NFmXH}YL(OE|?lFZ@&DqYZiuPz71X3#fAQiy1e{ zsRZvF(%UR|F`ybb8;lx^;#1HO0{TwNxw|Grv-wAc!FMI^_%C^NojV48sAKLR?7X69 z8FS};8A4YCJu_=RvmQ{Biv1m?`a7NJFP_M*xT_H`gu!8uEB&w)T3b2iEX6+`%L;oM zj?9mxZp!P2(LtG!=OA8!W4h*)iTeG){r5m!ST_ZQ-yeVQc^R9It<%oK>(1Y!4$XoO z$I*}xF3}crV&aR~y1<9DveGOS?&vqh-4GoGw3siCI-UR)AF2~O&M`Q&UqB3*Q&Cb8 zeCKHi7Q^3nE42s$k*|R6l&@3>377y5hTy&%=NyatLl(JNqbHwwT6|pwz9H2|xBsjh zbv|5Z(5|04e%?m)k4C?rj$;RZuoblroBC1m0*?s$ywFMR+~<-%paQ;YezF;TF`Jj4 zHH*Bdp+??*%PsS!c&}=ARp@Ge;A3LEmuz#j$@x7ix-!Eu=1c3NUnUTjRkoG+rA4Z= z^XSd|bc#0k>Z4oyb;1N>MBw9m?WO9`M7mW?rXyCyP*GNnc{1uD{m7UM2bY@jkZ}cH z&!|3_QCW%hou|*3f!523s}c z1=w#<9>V@?HL2Tf zEQRsY1gJ+2`;fOCtcw}aK5f1En&`yDu&tHm-;cOUht;;5di)gw7d$-^Mb&aCoI+)_ zWePNm^^* z^|jFZtK?rdK^~i%9SypczSuqBGTI$bjIOj<=5vP6`LM$8Ziu)1s*0Qml)>+eNE0-D zbC}*+n%yT3AE|O(w38Zh*fd)F$f}>{7Z}XS@5yD`yA-_mqBhTyBLNUGB=6P@yt-9% z?&^_Y2Y)gFtoX5i`F^>kC!#@={*O*<-|NMmMch1f-w?cj${n)4nbf=+>NrGR`3yaI ze*>}3Q&A*jBSOSl=oomMi6vM*{dLya-$2v2;Gp`)KdtM#^AGGwooeQ?`eW&x^KzSg z#LPVD4nLKHQMc)wl}|5Zf=VW5(u8Z&Qu)vO7buvXR^Sn<&ZJbA9FrhCL;faRC7s{@ zPii2QWx;dBk=qQR``TjA9)u`vT-pIAM->kONQ=FE^~m12y=6$Rreeb`!{&L?l+?ph zARl5FSlQ2vW{$n&1%U@$SdJgRfmpR&kIfrBR$XC|L^m{n^Of-cUx`gFfs}l0bw{IJ zw?hqsNr|eF^C;`*eA5w!%jA-BsWxou?n*fzxW-^xL-FmWFNZEZ-8MB}O)n$@)YRj{ z1!(7}&VE2uTvzi9;-%imGQF`v7TNLSAdP*_l;p$MC~aqFSbkd@TD2uJ5;S3h#g{>6 zO&0rdBM>9e$%2mex~T=-m}NTSKfz8H^BA(sdWU|$$(aHVg2Eip8%y$;Z2;B{96Wj( z-IrwYe9I-fkpYhgEv36JT9Q{I2g!Q8-seDqjDKM1-JPzr_==VBP?DBYMkvsI-yC4# zNB*4@yT@7dWmVOC^5+Ni$CEmsNn=x8Kw};{(PuAb7)$E>#EqpqGPdSPoWTs$<`>jb@x8jz49~a07k<^5(LjCRqb%&pJb=D0b@NHVh| zd;Zsivs%BIq6eah6fGls_)*!W*kYIh3T*BND1&4?W@hWdBh$VbO&j2y^Rgp`qx9+1 zLzJ+B_n``{hsDuHs&WqUpIVZ&#N>CM>KO2M?_J{_W%(TNocc-R^42_;<>}}or+)H? zr8-9zXahY%jr%YUIdj%VK|S5_QY~srmjhKIc#$jEIq^3%VrnQwv1x9doFMhc?$CIp z=w#VQOa>G=|65WWy6`kjT|V8Wzzxmg(Sz7RkEIvSOi>;22nwVpzz4}TD}Pia!TPv4f|?GY=2sR?ta1p z{F8qX2OB>^$xTxkm-4Zc;b z!9(!-nuK~wiDpmw@(&uu$(bs8z*c9Y41WyMZvU*CKsyRUpU6vcZEg4OpFB^3hg9^Y zQl=i%6+KSU*m*h~!>h!@gj4c3W}IC#C)ob=M?)pF43WA*bo%+E0#c0eERjnTWH3W5arZaQ3Y%3_P57$pcM>O znjB{5znIz5{$MHQO7aam+GX0ao*sdrMUW9QIOP2Td(hUplcy= z3ep*)JVm$ld2`qMDwczFtDj(Ww5fR@{i~7@ZgfV?gKbg^*l2^2y5W`2RqQLnIGnhx z4I$bepEUjLNn%c#NxuDYNN&D#WSDZU3u{0ev2EbSL_gem#kGK(#p(QY=v>5$M%rc8 z$alcHUr4pmDqJeiURd>PJc^1imBwRe{a)B)*>hH)ndhF*$eT6rUwhE>VU=!)+0%fV8j^5(yukCa2Z%MT^ z#{h(WUB~uM*OphdqOQ^Nmu)fxJnJ2$d<}s0p3myLsSaxYF5i+vw<#r>=lmfS}ieX9IEmQ!;CV2t6M45--ilT1KF@y>ld>I@fs;fa`$%sG@}mFu4tR@*MO zOV;y|hLkQCi@F4oE+QZ%+xg?(7k{7XUA(N$)tvIwqA%Jwtuo(2%pOd=xQ7*G31l^V z!-XelUy8QfN4ybTS0*%tT~1p_A6s)YvstStZr#w-J85`G5_AQl zV#TgHlj7!-)@JaGYd)*?^uSFQe-~O=QfTHUsWOBJcZIAy2erRdHBmHV_SRe1gAU1j zLLHb!4e`@-_?tAx@Pb8tMe3|GQn!|XHA<2#st?CrM(`UNznm z9mm5{*@88Yk~Q#>63UtE&i9tt7d=7I1R~I#7%25luY9V%cg$9Tw8K`!tPT;N(*Ar? z@8=sQU1XJ-(HGM%puy)4o>z3~{!|40%!6M4=7qciz7-H9>KeD$n3ZE%X5;e=G=nU@ z3%7BwMCZ|d1*s15mgP)x54N9o>#EZH`ny?S)>_GE*^X8f%_w{p4O7=WFQj7@G_bKPZWTI_wV1IFyyZSgCjd{sniq)k~uk z5RqdeNu~=Q&WPnP5u1wBZ{MFgAEepvM4OWJls1TtLI5#+{Mx!k0RgEurrlw@*)7pT z6#!*Pn+1iKEwJgQ(dD&MO{6YWUE{8CA=paE2c~b1%rE zVROv$Mti}iWpG*CvD@rBR-{09o?>w{%Vx=NY|{6PD_Wg>@T`;q(tg4BY@Z666zxD< z<9#on>w)x-=zdQ9%}?|2hwok1!|p4`ZdY<#LK|B_LP|Ae~X{zb%gYMI)C<6Uh6C8<0Y_!J@ObloU{GV(MLl8|b*Lo;hh*dGTi*i&E z#Ho_i9hT8CJRU_kxK(b{-Ae2!LS@?JsXooaz7)ZB5CMq8R=St|-J+$t%i1T@B!o6n ziFOepH5ifUb9MzWQDLX^yKB5WOLd>@8kfWy_T+dWcOJ7vG1#*>2DxAVcgEEe!@Fuu zcKp=FzfGZE)w}C|E&m$&i3~Kw9_+`CDU*S;)J#y8jJhL(^c0PO9*QIRHUcJ(OeNN^ zB~_8{QTcG@rx|KLit=x17jzqttP6WIeKMgCU!yx zNyAihuoUA#H|MWKQL4kCyu=^qmjW)-=6dW!Nq!`DOR&n^$bK=U*WbW?lf4shiPMEC z+RKc7t0^B2gcSvm_^9@EKiBe3yGvd=)Ry={2ihmaH4Zd=j{96oIY`az(^qbffZ=o6 zFOX)WT5ac_k^u0<<=aI4#P5J;amP6JujRXow++K#(jLLJZ0KhE4eqkIySo$IA-F?=ySoNk90I{LxVyV8 z?kuin@_gq#*O~dXJG;}})zwv9cS*Zlg2Jea9w@-cPRJ7DUiP8udB17eA)E9WHP ztcwgCKzkR;Ls_FzIaqS$*`06${b=-AM;F)s#R7C7D~FX7Qu?yeSBl=d;a&_o+N|p* zlMOqDm6#F}Uu>TxYXVdTa_`WPGJPXC$Zk{ z<4;*HnY1uU9=3|+dQX`ZJ;Dn8SbXRz{x^K~B)2!+6Oyi2cO}ldA)e>X(~QNro{^ zn>~3(%SlW`8>#Q&j5U$xW@_PoPh$KPUh8_)i|)j*I{s;^yt`#*zhHF%VAI5)^2D+c zbocL0q0J_*Y)-s04O>4bOf5ixr9H|T-5sN>o~8D^5IYXz!u(0eJPU|iXS&>thy_fC z`uSU`3xEKLc?Mf0eB(O*o29%vv5RrLDaz5gWj4u{x`SF<+i-fAI(VZY`G^vR^7Lx; z^ooY8yFKIoe$se+G8M&DSg`0h-mChMRdU)cU|GLL8U0bXJ$0U+pesuNCh|7?o9ww_*c?19@a02=18wJvk~Qi+=uD%BF5F?Vn&{{HIg? z5li_0ch~gb5SI{om=F8;Vd$KA?0N}38U30M2DvxK88uk3hQWoE>_k#~*bWn7I`wuV zZ)rGvX8n-tPI!80UY=?>Me~nZCd}A~euyPwJpsVm%>l6o7QHz86W)Q#b;?sw$Fh*( z6^nP9*}`EV3y315RYH~e`#S5e^}bsiz00NK9CMp+<71nis+F8#3)^3b@r6D*$%ds7 zXN=(iGiQj+AaJwSiZ=TRp-MG{W;Qdv6KEm1b&0K)$S0t@H`#A=S>?!){FA2{?qb`v zx)!??MOGUe2Oav5@Z%9GB}X%Y5ONTcec#bZUD5j$bU z&?}phRfW%U*X;wqmfvX5K?6>-0-=kw&c(O`zsmz&DAp>>SepIUTnwCOO;xRRYpR8t*;_HeU-z1Ky($En2 zk);cpvvF@F>()Gh)@9uPRLzwSTV*I*L*WZo@XXz+oLY?F?>*Bz!Qbczi_U0fYERPt zWAYT6Y8mZe1$;KU?%E2RF^IcDB~yX}=qwQtx%G>Ni< z!vf-JZeUMU*7vGTzVQxAC(N@C?aeW_MZH*4o(@fWi>bp!NSuB*S^#BHXstL-ryj%I zR>A8)thIfQDrbN4xr}-CE!zOK6(F(~`u|UW|G&#=Fjkz7K&*k^+%k1Cqk0C(SYxY{ z)uCFWhmnWzM1=9U3(=D*x%oM(@Js;^<4>gjTT=f71kyzVnmDjHdl>gWZKY87KBly- zrnAIy?YFtdtH%6Te#b<^*LT? z!WPr@HobPVkrR=c`_jxA)YV1;tvJ}N$<&uuXC?o^-!6>$O%OAH&6Uid%8jbzY8vEp(+~Eb>5Uh@KQPMvN|m7W{$x2^!w#>gb6P}1-Y68 zPZ6~#^ZQG!(Tk9s`!fTL&lQ1*P(q>9NJQF;T4b~wPzF=PmOad70GXTut+4UuG`7~O zMi#%PWcEx)RmATBB1dDG9nqA<4O!d+e^3XZ_h%;?N1Txx)?)lerv?%~$uR}@f?%mh z92R`F_@DY_TQ>`QYr&u}H*WBo8r-{thVq^L07viLp_7$o&8$wz^974s+KU;fUdEmd zlLTAOUk@K>*aM1y2!-TX@AJAc&DoUB&)ZYHa&@bVstQkMA-S6eUoO2*`Hnyb1EhWz zQB(^YcvA)wsb6Df*siH9VuoV;DzIU54a?2kpQT{DuTHEzc7J;O6%T@;Ic!*JYQXGI zs7ubR+0Dd3ak`A7c)5JF+cvPn&rejj)3ih?6Rfo1mEUD2 zVdgN{Ti#gqvQ7A9IO8y`GnROt#O<=EC_RPX0i_s4E3}| z6dy1Ov9ev|szr=J7onu9!&g;b1s(XK#I;U&2NW>_ZF}@P=wLw0(yWF&#CT-LRhJ4kiaDqX<9A z@Chc7$s{hTekp;)_Y(y`^sJ~%XOpmrh26kd^@UZ+^n}N7_%hS(L%I~7^EYe0OT@-W zzbV*fo>u%#i4~UV-rm$1wPMt~#*Xh0t>m!E6Lo!Vkbqe7(SucSe1akEycNvvBbi%G zr-=>jPonS}=giCTwmeEHUI3NM$0E3zG#MT@2qe`oUM8X0ZQ#Q1GWXGsPSZ5$1n_8u ziLx&&(a(kq6Xpom`~7R140~EuxN)3xqyr!GJ3uPL^FDU1_RCxt_g<%mtfBeRrnF^R z=ED40J9IpKXPdNPfyTIiIkVK_V0Sz%sSg*o3vYT+CMKl;HXm}je?;4`golt7u9x;T z>rED%u@aW|3Hm8P7u~A%P^hk1S#ZIU5~v>Z{iVZk{IasRbI9(xo|7+`4p1 zN}E4!Ml?#V;lxatPgkTVKHp%D-uqBm5AKC`XiR+CYH&Q+NmAFE)Am+iRzlGdIEFUt zosBEO?;;8Zs9*K`T}jZMUMlnne)MMWSoh>0P&08RCEsp9Rkfzz_=K+_8$%70?z-1v&tC^SSd`zp4zjmg>$;DdxldUD{DGd{~f_@C(Klq!!T&#)K`-%ENcX zGa?M0*EDO*mP|DB1+-RIDV`;j4lviL0Z32Ri$~{tB4o8nOZ}mT2C}l$U$p2OrCjsy zzTY6h-%nVpx5(*=jTa#8$-K;IuRYcnj`{5{wNUyGxD9aJ#+LW=$A9&;?5Wx3E$5NU zuQjq^fdus@-cKB2)rZS1+JHA1|2 zG}Y$5Cb>UK;?qMYBuZtwMcE;Z-5;I?b>_v)Y3Y0cpNZ;O?KE@J6Y?Gi;*q}Y*Ntb_ z*3dP6TFRt)V+mWo!w(BZrj)v-TTNP^p|x^pxVR~@*BdV7&tWRvsHT(SbG&5Ljpk!Q z$4hrv);|nCDvY9dXoql(D62}#3MA%ZwZ6`lfiBMdS+k}~4=xYd)r?H}nRliv!^OLm zhphQ8>s@Sgvs`E;8N*6?<4Op!Xg*0B?oWmKJ=D7(C;5O6mDn@uG^Ot8%u6MH zb3!e+I}Fh1%(M7=sK>uj6%Y}m=#RX~V&=-9ZpR%baH--%R*2?s|E3ewLR99kzRga! zzW9p;o1=sA2H}J7@=A*4h+vj}GEQJJurPor&`=q$JdBzvMeclOB&`LM1j~>o+)$&s z;xv|~7m*F>tM3_7T|MuS40y~bcDOZG0Tfddl`DJk+-PlT%_!ox5+j41W4NXRpZe!dlkAfs1&BJUF`je0~GaNpK9w9!}lv$k%!rXi>G zb#1jer#+_pY}`>vJj>WXMNvs3gm-F&X;5GkyCV z(@7kUE@c8MDfv!n;KNs${6W?%FjQWxZtmV#ZMJfCns2&|M|Q$9rzl%o;N)$p<<7Npc7rUBiQv0TMS0Vn6i~VtEI%hHv3;ZS)Y2#9`aO@ zG;0z&Z8DUlw?z%}PXgHO{0h2=+*Cbm{GxKhew0Xuo(1d)>#XHgb(=z$fUMOKhqc{z z843gMMAfXLqBqrZ+?MpWvQykzi$e@n_`18jN^5rrYPm1N^ZThUmJJ7x!j2?<+5{%v zqU%|^yd9z3=2Ac9_HSuWyL*r`etKBLtv{(^#XZQ7G32!&`4RvUHdXzC z1R8wMkBMl&x=*Me;5|F82hV36@#%!2<+BHsLkJiTH#u+RRLZ`Td@EGFTAH)_WTO)O zOIbtLMT?56VvqwxTdPK{(2SNVdgB&N4jPp~*?7^8>xz6Yo2*8TlslRk8ak3RS3Ce- zx{OEh!L83$f^^uNKn~2YGHhs0?ip{Oea&hYVOx+otx9(@c%m2aB;rUfDQA`T&aLlz z13{s+izLfPO!8!Z=n6j$aU1(2)8@_H7piL)AQDBIxBHFkC9D(d@-)c*KKp@&P?KGd z|5c9HC)-xrx)?s(_R*Vgk+l!aE@Q*PyaC#&adVlNa4Bx8GF|(&wqCLGW8aZqUz!*L zUs;cvA7|K}qpoUpIam&)tXM)V|7E~&r%*4Bpr@WV#YV*e|M}vXo!@trX?~r#cOo!G zFn$=JS^6>-4pCcVRfD&-Xe^+`%KLUsO5a)N3z4@eQ4y-i=*atYH4x6@lW~kb!k;!!KHj@K7O=o?j}zmnr}YHrGvi&HnSqJ_qD^JayE~L^(f0Q zUvV?U$40bhzg}kMzp4|eM|ptoV)khz$~9q(Pg~HZl?Qs?2Q%9}ZL|47o7r18wc-#q zF(7@$8F9+Lg!j*`+()N*8Xvv{1WqnZq{bHZZe~8ic|B3a88LXB?Jfymj+YbGDeS)loR+IhhEOnfRza=BYRpY*F z{wCof6H;4)m4VR}Y2m%f%5yh0E`ghlaBf7xd3442cQGg9Zil2>_=bCbYROrhTbW5u zsdZz8WA4^7>?fn?&puZBZbfL=QSjZc->O9MuT;!b-|PXesdpa}{Oyis|M-Pl^y5?q zM{`sqxWE;*LjKI7L0=e;4NLJ!3vG~n>~p#EFE}X?pG#$y5QU9>cAtala=I52nH-52 zd8?u@A6ex%>K4r7V||_R%4V>M9%6*}9Y={P1HCEna+_c?FsL(zy!^{)??UWRpsz2F zzNId+s?W2fa=ld!Mh3}ynQD3JF&srRd$l+bC+q1{eO24TcIi&I#=2dpZWN5%RZ)>t z`ENhhLfEO)KE;W@3cOmR#(4Y0>WXi1zr!vKOcmSK`vf^D@Y9)IDW0C*y+|^MQW}ur z_nm#CKl6Q7nhtbBXUuz+$uZ}O%BqFxuxc?7j6Nrl0c9gxRQ*M5M!>*R6t#k3LnD`Z z;q%(#BJ%XQPL#`cMw|z0sT?$&Cr42H+?Pw3nd*Mx%N%7g; zoQQ=e_}T|ny?7oI<6h1w#GlvF!^wk4L{=5DVk=PUraDZ&G=9Qi!Q7WbtlqL_;9?(d zwqs1QUHWj)KT4yF;EFtZ*J2%yf+J_9q_R0LU^$dKX~QWps^giH$jF6$ETb_(_U z)&uip8|6BDA#n;%0s;T6h5_pU3{ROHqW-GCT(1C6ID^jkSLF+}8bj>}E0p9r9g;)E zSci?31gHsbob2orJ%*{S(XG7z(e{nRa4imbuDrsP_FC%laZu{0q&QfZ|0RpW%7v^J zoOg4l-ulN~1hfa?)uRX@{@r{S@&TIqlRQ0>it9CzH+#8RuJ=};GT^mKLA=RNgT012 ztA@JTh@-HUG9~U9uL4?qhDANj`X5;S>Qj}C*2%WK|qgrv1~Jt>wHU)TXG+M z>Y74-&UXYWT5pNO778ak7YX_61Bjv8^c`}66pnm79nARPERknBb$B&x z6|AxJ=C#Y^c01=<@K0sGv0ej`C_!{ZkD*FfHPJf%pz-5j;w7rkLTYzQeDe+SJ*{om zn`zV0I!*<}2sW)+tBPD&yN!ddTsB%Mg}V}AqUG{_3B(zpwYPB^h(Lq3lY>swdN$*x$KW%E9BQdMd!yjB< zU%M~ZaeargdC{NkhHmw~S?__TjT|&5-IO&azHah;WlzE{wdWdEZfV<-y3OoIgkGZ}>0=*Bq_Di>-PwzY7D)_RJeZPEPISp;gGSWmNc z<5dU_bf93!TcS+IqEE8 z)~&ATND*Hn4;_#EH(7GTJtCI7bbasm#nIPp!>HF zRh{4YlG!9fka~uwM2s_7P=gL|7WfXKj}k+f4n|{;Xzkx6cP*x@@lnrd#jKj?qq0ll zmE9)spM;;(%gpVzBSK0twjKS7aAu*j_|YPgm3pYs&Y}zF(*qX3yU1Uldtf zQLX%G6CTwt(Ve`!AwttXvvwQo8LEG^cHsOr~S4FfjRQ3LM1?LZk zc20xlvSMMsw-Zpc6RWv0_7EPHRw3DpVj2BtYMSE22alnW-=zHN(L^g_R4ehuhpJ zI@-2DGvpE!u+cD&{XXJU-CxlrOOH&CYuonY9mwjG2#zE*HMa~l#b|>5I_1O7c9}%h z_7erBhdG9lnbr`hKQ1gSg#LAaQ!y250>{Uyl*w_vsT6^DEM0@fK^PUIK0_WX%(aE? z`#b4Ir5NeIhvFXeo0ZJZBvVTQM`pUjL+oi?@tzI)J~k?g-m~+}H(@Ur9z|&8abKFG z!=8)LM&t?A=dwsRk%B_W+}7pX#Xl5>q^DN_a3T_4eE+Q&Lfnjw_1pSwvwAe^(=aTS zI8H&vQIT61IX*@7Af4zyISSSkW)AE?(L5g+uDilZrG1O@3Az)-h^9vG&K~=O7ATyT zvz4xpag46+XR&Gn+%k5gRg^{zrE^`rl$F~@P`1c4RSu@`w$ex~~)eq;D zG6iHu={1K!#papQ@3r9(2y)0{V&%k0M5)q0w&4?riF(xCT~(wwH)JC?d^oybjOedo z3zI1C-P-0mV3D2=y7A6@iah!q(^D**;Ef_I$GjIi9^Qk|L)u}$E&K7S9Q}->A>YeA zuP+H3O`zuxxCbNhS1e|tV|=bd)c5O;aYGZ;Mj641)*o;U3x28mDb;b&7r}OEZDBqz zzPB1A^uu?q#cV{aF=36EK~5peK{RJC3l(Gd0Cm&SV42v)fJJ$}v{0O)$p|JP5m3FV z<8j8y6oq#so8#FBpw+W5jBe;T34`x6l%cJo11(} zMij1;>Yy1+g>OhOVg?~H=7Y>s0<-BT`xX^)8p2PC38Ot^(m{1)9?!Q;9xGWqLzvc; zOI|7MAuQ6aG=(6h6NyXM(jY}i-c;@Q>y0ZEP-LP{rmWs-v&nQ}S;FO%oHfgpcwvxm z@m#5A;(HR@tgDi_x)RMvQfg>z=->#O{jB%s&l2JiPs_SFJp`dS%uElX>@TO;@*qqj zwfPU}lv1$?b7uRNPGmgR^lZj6(iFg+)_1}pQ*~)+rWMJHkkAdy(_~PU*vr$ZIb;c+ zmI6HN@~ZiRsI-{G^rGlevsdv$Cx{CBtJx@|LUWIN>AgDDkHS$eYEujcbK4P>fuUet zCnO|+N(8)2gp&Eu)DGhG6_ZJ>`@sQB1d-lz_k{%;_6UyB1v`9d_rntL-uU7;>8dSa z{GW&+(uTpq5Z*zpFVM-~xJe^En#YKsWWP1Ot&I1~av};tj6E0R?@ z64;ckaU9cW?>6y-zu-aBQV|bqZ}T{=K1s4HF^};w_a|S2PS>>x+crekB6YEiIljbd zMb0)K5VoHK^)#{aNsc2Zh~9>NC9k(`TpKX>nm228LgPAsfoNK>=teAQc2Y8WbP%710$SSL;{W>k=(7+3v^h;9>9k4Shzj^u7A z=`Y4x-eM{k=4*9ngS*jwec67g{q64xFIN?AT?yxf3X2>KCu8Lpj&d7lm$H|$EMB+v zx&5QtROP-XGv1fvNIbtWzQVsvMOb;$l1Crnh*K+R2v>D_qpmEr5`NSbCCZ6qEP-iiW+HqrE{*wU^)!6ERP0 zd++ApSA5C(3Y9~QYytnMn`j;faTJyt;6Fc5R@euC-G&Z%^G zF7VU#+3Ub2)-Pr@+$N8)3S$vNIVUTt9v=hEY7c|;-F&xVMEImy8k4vSJq7|xzNjPA zM4Nb1LSC|xBzb%;xVKTF98|Jh#|BpvJCptw3*e#e;sBqN{>~Ybx~e?Tj!n7-%=KE! zTh-c_z%;|RSSfm-8Q-;}4Bf!64P8n=j)u2cHV_Cs{$#E4 zE&duuprl8J;rN?3W#rYfRy7ITfuUISsJhQ6>lodIKXf@EZK@(N!R9GyBbN_5nDQO3 z1{mChp@|p$h&E$m`c1uVNjrH=xN%;fM+TwzCy5g6lg4#ySR1zko|(LS%}F*5#A zI)~lSY@Qz3PdBUNHG1<P!v9EL0UO9nZ8&jxEVG0I937v1F+xbK-r?tyx5{Hh%$zFX`YQ{>$gJ z^2Fl01Ag(pcd7%>G0W}gGff5lBhmL$55FS0I7tFG9(X8I>EOl1;+zo%^#62|3ri-l z9A=7O@^ij8aF_hBbRk9(CN32HcKGmmF+O+kVD7@Kr3725#8R5@_Lz?5Y5K`c{HF>` zz6y!oWu!d6>#Qff1P2JFLY1co`W^_(>J$ZqcKwQz)B2JQjaOnd^DF9>vi@UHAvwM! z^->h6T{ga)X*`{dJnN}?YwdvElslT3U*FnYb(XOk=Tk=sP`&xR2_y^@y(AG1|Sq*8fO3PGg-HJzjxqkklHJv0S*^z`kZe#CP zigEe8E*E=kG81Msi@E1_Lro!rvKR?@Sf=`nfBREh%iKDN2_=KZgn%C!jp=a&^(vXc z+R^?a7nlCPvF=47;*p2{?P-LS5I}?uO1Bhp=10xfJV*(G#9_XbabS%32DkzsD7^*yMH?uA3-~EaFLdS3K{=~*BLaljGQ)S!$t3V#n>jVc+3DfQ}{gV>L zz5-T-Gie!w@$xcybl0OZ2N-CkCZy!5X37b&6bDn)kF_CKoYK94XX4Craf3f1Y4116 z9)V10&gy!egaLNCLT@$(GPdkRF0|9icn7gk{Z-G8AuWWZDu=UtZ>Xl=cNejZWXPWgK zNU{GcbN#k9dB)3VQT6ae!Z6YAwfAdXvoLY8bG)p9xcJ+(*Kl_4oq&vgIN*iP=!B>s zU#RU6B%=qP!}RjKnVlVadPmtqxQ6~rr=RyZ`ODUAs2eY;TcqNmxHy-eMW`CIvmlZ^hm z9ldGf=z9c2-1$IQQ<^7Cn*0@QG%Rhz2W~!nXJ4MG)urA)<97LN8{$qllJW%(28uG) z#8Zryx`X4VnH#(CW&`e|h|F3U+#`IEkgfUa#`1&jh+Sq5yqva*zQvY|9yZ70qS+y< z%6rgImd$eQ(`gG<=? z!T91HD6KZB&LpREQN3c6%TX9BjPd?lLE!Ul(_U>xEG`}OF=j#`*1L}&tislAKxIQf z0Q!FSLIXfp-SlW<=jW%T7rVw8@v*}Q4?HzCS~=U63N*j3BQXGvg7OIHhdyqGBkWH4 zmW%*_p%M45{QY0fWRZprpCyEmbbd*R!|Q_OXziE|ifyeK<)QbwZA8u|%EOJ0r3Is>AOWcZ@5kS&C~8dfD8ZYdxYsFVIy&SBIZ z^}J~Oi2WIRY%2J7QBKCu=QE;zaMMT>+}KL$-fQy+xnF+~%#e#1dOG=c{-X)4@6-_T zN~hm)O=HJh0duy}MK*^m4TKvhZVK8NRunS<9=_@C8~S7V_!f>Pu>PrNJ2aEwJb#h;$>Pn>@| zDk1Fzru>iHhQg+j#0W0&Xw}xv%p4T)m}KwxuUCb#-Q>R2!16 zCQ1920HVVg#ZO+N?QEnaW`pkWfW#*1Xv^Uvb0L$)uAlqCx^84U!@$XTi+`FvyseXN zzEnP}TtUn4v5Jm%%3L#bz``F@m(aVY8t}#mcTmL3`stbTMg@wUOIni2J;gzxJ^`*} zitaA*W9m7HNj&(<9n~~C2ma&?mka*0|NW)J1Jft9ur`DAW#$~sG&riKA7&YW^OnN7 z+yjMsrgo3QjbD2=e|}kVH!0X71m*A&%9vHO!pUkWzOU`;YShs zzM9LLLqC%5UF}qTEg(Q4c7>%k{OeCHQA*%jCmL2l? zvLbKi@okBGGVh~B|CX>EW~A~5Y6>s$qt@qX9*=JtNy=-M(!1h1S#8PW2n(s|p90e> z^B}vA;QNh^W3w83a&E4EntRUcPof-ZB_XY^P@elyq9RMi#yN2EHy-5=&C-*>C1~n~ zvGBc%Z^kh*{iJ_JB7LxJn1GA0#TB^`kz+pkefwivN1!f%hcJyp4G-T7iT&B5yLidu z`^cW{Yi)oIVJes}yfd8A^1T;*2=TWq6c{0dEL+t1RqnBz*I_#=7m;Le=H=)55=9aK zoyGEd30l?l8s;K^1C@OkUFtSs2%_BXx-Y^y=Vdm>UJ&XbI%30e?AvZwT1(`$9O>ZNNH4vrv+-YXoi)S$Z&{o^-aPBC&!6QB zqVNJ`aKv?_dUEJh&GIkt2UlZ8&%p{d+c<7=Q(BG_o;RtI)-betilVc|7Tgi>ejSF1 z4L%PJH-yF8O_Uw+l(YG9uNjg8Y#i)dyU^3|(~Ss$FS+!Lamt)>KmKsk-O;LwOYh>E zV<`!FeiDZ{70M+nL`XD|E0S>=Gz8-zK55d$GG(CXS<2}MvGATkZ~T_VZszCQ_aYuI zbi;^BMV+lcR=FvxVMyfZ=fcwMZbzT}u6Ou&>#M}wGQv7I;@dQ}g0bsM8jn~QYz`Qj zblrxz$P(TXwH%6$ftm!-@fbmKWdiBhGR@OGb`#BRUTGaqQ+B>Cx8#6Lojzk`YXzFn zbJUzZ!1!vY`ne!;eOp1p_R7uE=I|D^^tDzqGZzL{T4hAas(rVrqb9`7#}z*2tcqf3 zOD;D@wXWML!N-%q?lVDmQCWCd)#C87KpRk_hP^pl$(X2?_gl9SHH4@Tlmsq=TrNL? zo3@CEwyiq|dtJz*$!Tt}3}@)D_T8s~T% z-4bwIdjTDSawkGDI`;lbS+hm~Df)RL6jD3y`~`*jFd@2w8g`#|I-a_Ltjs3$^~M0O59rpA#edI$JTEG^Z4t&a$TM<-AlEXXJq2 z-uJ9qUpuO^$0yVq!gI_RzxvWWE1NgagX{gTr@UOJ&`!ov2tm%`$1FmEcfIj_DcS`# zltuRm;@)aBv$@b-keEXAj5O6k#HZ1e!!MOAHgTYHUr&tyf+0{C9_S zydEmKAB`rXJP?HG%|NUr`@itKy(-;q>zu`DtjkGiFYT~B&gDwHGjqB{H0<>jm&h0co&mmPS_p5x z)faK?i{z`6P2A5or~(t-H+n}NqtnZX!n=^@fKUP7&`Ox)_dfb}^D)e7(L`e)+E?Lb8s7Ue=8npN0$l2Q)*Ya~ z9WOS4wvSdOx^a;TGw`;;H%)|=8R^fwwZYP>&X)R~t~_U69s&Te5v40m>549%=g&;i zZ)Gk+(2%x}#G+=);@Os7)p(L?`EuYT^0ZYcC2YzTx;4Ix-&Zs4QZhnhwf|Mp_cAVF zapvqJ&8OXqezci5aim8IqQ$Xi9@@s}T25E3Pu8BSOi2A`SODg2o6=vFv({@}@9NMO z=D{5vwbCLL&i+Zd^z!DnzF_%!Zr(yRa7Ibx4EFNPY~xG4L;KB<65)a#S;|KfJ*jSm z+6bs-4OJmKE4CN|tw*N^LF)lXkJm*eY|WMNewitf6KbZDK3;9X=E%VjvG9{=iD}f8 zog4dmKiTwCKT7`7emx?MiyTm}8iaj7LO)lTOgm!Cv4oR3-lIfx10(nE{m%V zyyp%5*1KDU&b=q_Rr z)oZR9r$_ISAQ(!oC31j%q%{k_DaEogbOO z+E#fBNZ@YFw*lMyWGX_!Z#)!Y$GOd#HP|kS9cCKFw;O*`Pg)E)7b?Y8%wX@73QO

Ai=T>$n})o|s%<|1Jqj4jP4s=F z9Nbq^?iVPo3Nm`}ZnJkplGoU~Vz_W5z$RQLTKK!fFfOZ{*}v_!=S%WaIh;7Kk|P93 zp8hhx8SlXvO z;G+5!HI;w#fu*vb3_@Bc)fKeVs-UYD7kGaR!!IxYCd&LE1dPm?6NmMO(nbf$j&@e@ z{k4)t*do*7oyMHgfJvmRm+X_Y<>Ak~Phb7tbWJ#afk1?DP>ZBNOU8iv$j-^Q%+p1{2kgYlco})d^Qw-|w)l@Hr){K!0&FviqZ5d7 z#bPXzLoTq{v|o2kRHo`3K3J2FPN`{1^_#bT4cNc6c$(4fx%_r>PsqtZfsqE!YK18b zXP5{(=I*cCrznWOtFmN+QCGvge_a6Vd*cVioAOmrC9TiZZ2Y+EX|~-pGqfFI1XL&H zPaQ9kNz=*;bWzbjP%r%$fluqoxf}7*5xkG(7{CgYqozW%{pHu2Vv`>)P&yVhK1B0y z3o7#zH74o%BGO9qON9}qR~xf>Ie5RK*TF`CdQVUyw+;R6N2LG@a@)Pj36*@iBvO(a z@UbIAlx;EJ=MC+LFf(we`B zAX>TK7LWQKbM%2>N%@;kOn*sRjo;I?5MvF9*DJjYzI!x|7L&w1Y8o%Ev|5~i6Z~v!+__C6+v8+ zGc_X2S0oW`a$V!9x3xHWKzk4gH^AYRL>@u+?@}ic7Wu_q=nu)G7h8&8m}Y%j30tv4 zQpSiT^<);?C@8&s-Ep|(TKTxj)xfU_POO9HMM6S~AQOLjXJ(Zik(0QmhSQ)@ktR*p zvng}B+TGD8_#x`d`u&3=u($^ZViJ{_b2p6)fvegxl=hZa;?MOJDU1&Y4EEt=q{-n! zhHS3?P4onz!U_%0GMsJ%GN1{h-x&Y*hljOxA{zPqOZVPh0$3pn2vlp~tA@uNXQ+z_ zY+du#$|xK|$1i%kP@4e0f8#GZyT~}r7-qEC2A){C1nCBvJoz|HXb(=lltkY6 zLrRA|Nhv->p+_8*y5QV`#m(4fz8?w(qqVl?bUYsdW$SUH?rPBb{&lBNay^boMeV6U z0Q6G@o+Yr^<6$Wr-S4SO`sWd%{_6;Xulq8W8?QDv)`b*X>hZ_FsY}keg4@v^P*I>i ze=v6?DI`9>&NF(X|L7Yv+tU(}tYeLLif-{AMAcH4P*Ni-ZCmF3UkkI#^O7ZLG_?A__bGf8`IgPXE$U8bSKQWb zBb#CICo^~!HQwmJwD4X3>G0OWczGb=5q-ZeQx4!NqzDT4$H}V<;)#C`7`Q41oVC(C z1|~-m)r(!n43Cibe0#17gb~bWNUH8b`tP5+S%HM=#5wiWX@G`#rc<04$Z7@-_xH9> zRc^MwVx0u7@u^eTh8s<{>|+IBbHS-jcdG%@PX|S=HEuIhM`>?2lb`5;foc=`yk$42 zZl~&I_iKv#qrkwVg5p3>4Ji(9NkpRxOxro&2EJ+9zrp|pBtZ0VxF+7!$b~r z11D4L*NV`oh!*!FB(HOk zsHrHGpZ{7F>&iwTQPkcDNpEQX9g?EIBF4v-^?P)snfH4Nblcii(m-0Mm~96jgTc!9 zx%=NeUtWKpvTow%$3|dAviH~cKOR{73qrGTZZA%8ZdED+5mWFK{5%TJQj+e(GF|$d zuW3xhIbK#X<^Z>clK|m+sVmd+$Une%F|)2v6oRFpW5bV)4?}Mgr@q=s=3uA0Y*min zgiG@l3R0s;J6&hINBGJ3Wbs)cmC@#(NZ1OKebCN$ztEt!)<0{HCm4iRC?stO8M_)% z6|F?L=nq3c`FKx@0FXEPN5C%7V4ss%DeoYj|GyT)uH-a%5u%D-<7gE^B@Mnz&7)k8 zCjDqpHeh)9>rfXng;xScL^7`usos?Lh!1R(}X7DzZovS#PPh zKyty@;!?`|ZUp!4u+N?K<6W0RFru&=rSjE`HknNuz}~&p@()r3{4+lJM(6c7Hx~`| zT%Dpq7!10rvA3lkUv+tMu>2#b?lx^PTh9z!tIvKYc&!Op9^C`Xicgf?j>V*f2+CdF z_G@>6VQl&x(7Y}QAqXp>lt<2*g>r8(m7><8wqmW>a3t-5fmSXw|-%)=pq=pdGpHy6E7_4=pxG5+@ z*|iifXlO70k?9z{{S7{Rsf*YgChy*{EB^!7B@@7mbP@CW%CQiMeh&0~BzbA#@c)Ox zZo!9-znY{f=<-N5fd||%3fU^6u|c%1ZtZ|Gmv{y%>Q>tUF(`q(c@(T0HHRlK!TTHu2#!TO zAUKs^Vg`n+i8lk9{kw-~J@jq>lV*A_0}8u*Gkdp*MP;j=3cMjIe0a=LKT&b*MQ~l^1y%UU5hN( zmyp_}*ape%{wI$k_NbYj8^E%d8?B>oFS{;O-!BeqP56XHV|>rPs#0;4hIXVq30Vfw z!e!HbPD8%WS^ok#>cBO1rg;Nk{lBoJ%79fB)GG~5zDVrl@p06zAw#^%* zEiQ%VYZMj>h7r%h-9a(g%iV6DMBc)q8~#t)&C`Bb?}MUl?_0NRWQ98*r9f8#okr&o znBeLf)Dki|2FFs2srk)|@CfdvLpYyIALVSpwBYha!y}92To6r|Upor1YPxq{qPy?e z?Y1uTNXgf09c!C4JUTA`tAOK;b@^YbGxj%c;ND4z3aJ7^mDwT2=+eNWG!q8Uq6+hR zz`=QYep%$QB?+i!Y@UAq(*-5Dr$25wOxgk#a=9Ddzp-p+xG7wYX7t_N`^hU;QZHSa z!G!vb+PwkZMvuXdm?Ent4EX4Q=nLqQxwFH;;)!!ZOX4sXcQ{T$p?Azd^RMm@B;@OK`kiq&%y^9Ppza^JW$MaXeueqiT>AX5B*M=d$p&38kC6a(7I zIeE^=TJ(k+XRmH7tpYI}K=XMh?;h;MJuK{_tmKrS?8^RolK7U~Cq~x@8K3KabFB@w zSNF+*z=Mbnxx&-~p8mS(HKxSFN7=L8J2?MO0H_03{B+RW?ve9M@rWN7vXIAESm(GR z{@LDtTK`8%qlhPkpdKpjeyq%I@0;d^dnS2kcZJcZkeY9jF=%Bk#JBFi_m`z%;+QXOjQiR%ZKPjlH8j<1-)9<^{;;KY~z4WUmMnbQT>3dOS9*$#U+=1ulI`JC~eZ!f_=*ppweKm_yQpf4r>y zKlW*G>mS3Yr~{1jv6dzuwfev-_wj@lJCM-QR=`&SPOq2r#$9$$MirXHZK?oR~3b^%wX_SUaEoASQkBW>>h*(Un`MqTn za9(n|(P*R;40L(qv!-9?;?qm?w7F;v7oJw6l$WgO^5`hI zwB;pT?Ow9r!oU=Gq-*q_2;Th@p7;;72{gs>1$d6YaiXK2G%^2jRUrUB)Qn9BNCeZ> zkOy{Es0W&iCm5X!x#fXrrlWxiS{sjI&q$pIc2)6QVZDC~i&2#5Wg+;ucG~hbm2thinDg /dev/null 2>&1 + + echo "$token" | gh auth login --with-token + pr_state=$(gh pr view "$number" --repo "$owner_repo" --json state | jq -r \'.state\') + + if [ "$pr_state" == "closed" ]; then + echo "Pull request is already closed." + else + if [ -z "$GithubPRCloseComment" ]; then + echo "No comment provided. Closing PR without commenting..." + if gh pr close "$repo_url"; then + echo "Pull request closed successfully." + else + echo "Failed to close the pull request." + exit 1 + fi + else + echo "Comment provided. Commenting on PR..." + gh pr comment "$number" --body "$GithubPRCloseComment" + if gh pr close "$repo_url"; then + echo "Pull request closed successfully." + else + echo "Failed to close the pull request." + exit 1 + fi + fi + fi + + else + echo "Pattern does not match" + fi + + else + echo "Source type is not Pull Request. Skipping plugin execution." + exit1 + fi', + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Github Pull Request Closer v1.0'),'Step 1','Step 1 - Github Pull Request Closer v1.0','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Github Pull Request Closer v1.0' and ps."index"=1 and ps.deleted=false),'PreviousStepOutputVariable','STRING','Use the output variable obtained from the last script execution.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Github Pull Request Closer v1.0' and ps."index"=1 and ps.deleted=false),'PreviousStepOutputGrepPattern','STRING',' Enter the pattern or value to be compared to search in the previousStepOutputVariable using the grep command','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Github Pull Request Closer v1.0' and ps."index"=1 and ps.deleted=false),'GrepCommand','STRING','Enter the command options to be used with grep. Default Command:"Fqe", if not provided.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1), +(nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Github Pull Request Closer v1.0' and ps."index"=1 and ps.deleted=false),'GithubPRCloseComment','STRING','Enter the comment that should be written when closing the pull request on GitHub.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + From dc0cf7d0a13fe105448450be9fb0383801872e8e Mon Sep 17 00:00:00 2001 From: kartik-579 <84493919+kartik-579@users.noreply.github.com> Date: Fri, 22 Mar 2024 14:28:56 +0530 Subject: [PATCH 14/29] chore: Image scanning refactoring (#4802) * added sql * updated event type for job in workflow request * added comments, handling for global cm/cs * fixed json string global cmcs * updated global cm/cs spec * wip * updated sql script number * updated sql script number --- pkg/pipeline/GlobalCMCSService.go | 2 +- pkg/pipeline/WorkflowService.go | 2 +- pkg/pipeline/bean/WorkflowTemplate.go | 1 + pkg/pipeline/types/Workflow.go | 17 +++++++++++++++-- scripts/sql/232_trivy_alter.down.sql | 15 +++++++++++++++ scripts/sql/232_trivy_alter.up.sql | 12 ++++++++++++ specs/global_cm_cs.yaml | 7 +++++++ 7 files changed, 52 insertions(+), 4 deletions(-) create mode 100644 scripts/sql/232_trivy_alter.down.sql create mode 100644 scripts/sql/232_trivy_alter.up.sql diff --git a/pkg/pipeline/GlobalCMCSService.go b/pkg/pipeline/GlobalCMCSService.go index 71e7032c82..ffa73097c3 100644 --- a/pkg/pipeline/GlobalCMCSService.go +++ b/pkg/pipeline/GlobalCMCSService.go @@ -36,7 +36,7 @@ func NewGlobalCMCSServiceImpl(logger *zap.SugaredLogger, type GlobalCMCSDataUpdateDto struct { Id int `json:"id"` Data map[string]string `json:"data" validate:"required"` - SecretIngestionFor string `json:"SecretIngestionFor"` // value can be one of [ci, cd, ci/cd] + SecretIngestionFor string `json:"secretIngestionFor"` // value can be one of [ci, cd, ci/cd] UserId int32 `json:"-"` } diff --git a/pkg/pipeline/WorkflowService.go b/pkg/pipeline/WorkflowService.go index a2cd7228d5..95824e560b 100644 --- a/pkg/pipeline/WorkflowService.go +++ b/pkg/pipeline/WorkflowService.go @@ -204,7 +204,7 @@ func (impl *WorkflowServiceImpl) appendGlobalCMCS(workflowRequest *types.Workflo var workflowSecrets []bean.ConfigSecretMap if !workflowRequest.IsExtRun { // inject global variables only if IsExtRun is false - globalCmCsConfigs, err := impl.globalCMCSService.FindAllActiveByPipelineType(workflowRequest.GetEventTypeForWorkflowRequest()) + globalCmCsConfigs, err := impl.globalCMCSService.FindAllActiveByPipelineType(workflowRequest.GetPipelineTypeForGlobalCMCS()) if err != nil { impl.Logger.Errorw("error in getting all global cm/cs config", "err", err) return nil, nil, err diff --git a/pkg/pipeline/bean/WorkflowTemplate.go b/pkg/pipeline/bean/WorkflowTemplate.go index 96f820262a..23f500c9d4 100644 --- a/pkg/pipeline/bean/WorkflowTemplate.go +++ b/pkg/pipeline/bean/WorkflowTemplate.go @@ -37,6 +37,7 @@ const ( CI_WORKFLOW_NAME = "ci" CI_WORKFLOW_WITH_STAGES = "ci-stages-with-env" CiStage = "CI" + JobStage = "JOB" CdStage = "CD" CD_WORKFLOW_NAME = "cd" CD_WORKFLOW_WITH_STAGES = "cd-stages-with-env" diff --git a/pkg/pipeline/types/Workflow.go b/pkg/pipeline/types/Workflow.go index 40f036ebd7..7576cefcb1 100644 --- a/pkg/pipeline/types/Workflow.go +++ b/pkg/pipeline/types/Workflow.go @@ -211,8 +211,10 @@ func (workflowRequest *WorkflowRequest) GetWorkflowJson(config *CiCdConfig) ([]b func (workflowRequest *WorkflowRequest) GetEventTypeForWorkflowRequest() string { switch workflowRequest.Type { - case bean.CI_WORKFLOW_PIPELINE_TYPE, bean.JOB_WORKFLOW_PIPELINE_TYPE: + case bean.CI_WORKFLOW_PIPELINE_TYPE: return bean.CiStage + case bean.JOB_WORKFLOW_PIPELINE_TYPE: + return bean.JobStage case bean.CD_WORKFLOW_PIPELINE_TYPE: return bean.CdStage default: @@ -222,7 +224,7 @@ func (workflowRequest *WorkflowRequest) GetEventTypeForWorkflowRequest() string func (workflowRequest *WorkflowRequest) GetWorkflowTypeForWorkflowRequest() string { switch workflowRequest.Type { - case bean.CI_WORKFLOW_PIPELINE_TYPE, bean.JOB_WORKFLOW_PIPELINE_TYPE: + case bean.CI_WORKFLOW_PIPELINE_TYPE, bean.JOB_WORKFLOW_PIPELINE_TYPE: //TODO: separate job as did in eventType, will need changes in wf template for this return bean.CI_WORKFLOW_NAME case bean.CD_WORKFLOW_PIPELINE_TYPE: return bean.CD_WORKFLOW_NAME @@ -231,6 +233,17 @@ func (workflowRequest *WorkflowRequest) GetWorkflowTypeForWorkflowRequest() stri } } +func (workflowRequest *WorkflowRequest) GetPipelineTypeForGlobalCMCS() string { + switch workflowRequest.Type { + case bean.CI_WORKFLOW_PIPELINE_TYPE, bean.JOB_WORKFLOW_PIPELINE_TYPE: + return bean.CiStage //although for job, event type is changed to job from ci but for backward compatibility still sending ci for global cm/cs + case bean.CD_WORKFLOW_PIPELINE_TYPE: + return bean.CdStage + default: + return "" + } +} + func (workflowRequest *WorkflowRequest) getContainerEnvVariables(config *CiCdConfig, workflowJson []byte) (containerEnvVariables []v1.EnvVar) { containerEnvVariables = []v1.EnvVar{{Name: bean.IMAGE_SCANNER_ENDPOINT, Value: config.ImageScannerEndpoint}, {Name: "NATS_SERVER_HOST", Value: config.NatsServerHost}} eventEnv := v1.EnvVar{Name: "CI_CD_EVENT", Value: string(workflowJson)} diff --git a/scripts/sql/232_trivy_alter.down.sql b/scripts/sql/232_trivy_alter.down.sql new file mode 100644 index 0000000000..fb7a70ee30 --- /dev/null +++ b/scripts/sql/232_trivy_alter.down.sql @@ -0,0 +1,15 @@ +UPDATE scan_tool_metadata +SET result_descriptor_template = '[{{$size1:= len .Results}}{{range $i1, $v1 := .Results}}{{ if $v1.Vulnerabilities}}{{$size2:= len $v1.Vulnerabilities}}{{range $i2, $v2 := $v1.Vulnerabilities}}{{if and (eq $i1 (add $size1 -1)) (eq $i2 (add $size2 -1)) }} +{ +"package": "{{$v2.PkgName}}", +"packageVersion": "{{$v2.InstalledVersion}}", +"fixedInVersion": "{{$v2.FixedVersion}}", +"severity": "{{$v2.Severity}}", +"name": "{{$v2.VulnerabilityID}}" +}{{else}}{ +"package": "{{$v2.PkgName}}", +"packageVersion": "{{$v2.InstalledVersion}}", +"fixedInVersion": "{{$v2.FixedVersion}}", +"severity": "{{$v2.Severity}}", +"name": "{{$v2.VulnerabilityID}}" +},{{end}}{{end}}{{end}}{{end}}]' where name = 'TRIVY' and version ='V1'; diff --git a/scripts/sql/232_trivy_alter.up.sql b/scripts/sql/232_trivy_alter.up.sql new file mode 100644 index 0000000000..469d1f2467 --- /dev/null +++ b/scripts/sql/232_trivy_alter.up.sql @@ -0,0 +1,12 @@ +UPDATE scan_tool_metadata +SET result_descriptor_template = '[ + { + "pathToVulnerabilitiesArray": "Results.#.Vulnerabilities", + "name": "VulnerabilityID", + "package": "PkgName", + "packageVersion": "InstalledVersion", + "fixedInVersion": "FixedVersion", + "severity": "Severity" + } +]' where name = 'TRIVY' and version ='V1'; + diff --git a/specs/global_cm_cs.yaml b/specs/global_cm_cs.yaml index cb6cfa2abd..16cb81306a 100644 --- a/specs/global_cm_cs.yaml +++ b/specs/global_cm_cs.yaml @@ -60,6 +60,13 @@ components: type: object additionalProperties: type: string + secretIngestionFor: + type: string + description: field for defining at where this config is to be ingested. If not set, "CI/CD" will be used as default. + enum: + - "CI" + - "CD" + - "CI/CD" Error: required: - code From 346786340dfcaaf6503af8fd0ebf7657d0e72079 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Wed, 27 Mar 2024 22:52:59 +0530 Subject: [PATCH 15/29] fix: helm deployements stucked in queued for devtron apps (#4842) --- pkg/eventProcessor/in/WorkflowEventProcessorService.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index b997b5ca54..42ce401233 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -680,7 +680,7 @@ func (impl *WorkflowEventProcessorImpl) handleConcurrentOrInvalidRequest(overrid if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("err on fetching pipeline, handleConcurrentOrInvalidRequest", "err", err, "pipelineId", pipelineId) return toSkipProcess, err - } else if err != pg.ErrNoRows || pipelineObj == nil || pipelineObj.Id == 0 { + } else if err == pg.ErrNoRows || pipelineObj == nil || pipelineObj.Id == 0 { impl.logger.Warnw("invalid request received pipeline not active, handleConcurrentOrInvalidRequest", "err", err, "pipelineId", pipelineId) toSkipProcess = true return toSkipProcess, err From a37e3df971231dc3be3e68ead6d68603cd7d9b98 Mon Sep 17 00:00:00 2001 From: Prakash Date: Thu, 28 Mar 2024 10:48:04 +0530 Subject: [PATCH 16/29] ns not found in case ips is being injected in cluster (#4844) --- pkg/dockerRegistry/DockerRegistryIpsConfigService.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/dockerRegistry/DockerRegistryIpsConfigService.go b/pkg/dockerRegistry/DockerRegistryIpsConfigService.go index 0a5e626c48..07ff9026e6 100644 --- a/pkg/dockerRegistry/DockerRegistryIpsConfigService.go +++ b/pkg/dockerRegistry/DockerRegistryIpsConfigService.go @@ -23,6 +23,7 @@ import ( repository3 "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + util2 "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/go-pg/pg" @@ -30,6 +31,7 @@ import ( v1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" "net/http" + "strconv" "strings" ) @@ -274,6 +276,10 @@ func (impl DockerRegistryIpsConfigServiceImpl) createOrUpdateDockerRegistryImage ipsData := BuildIpsData(registryURL, username, password, email) _, err = impl.k8sUtil.CreateSecret(namespace, ipsData, ipsName, v1.SecretTypeDockerConfigJson, k8sClient, nil, nil) if err != nil { + if statusError, ok = err.(*k8sErrors.StatusError); ok { + errorCode := int(statusError.ErrStatus.Code) + err = &util2.ApiError{Code: strconv.Itoa(errorCode), HttpStatusCode: errorCode, UserMessage: statusError.Error(), InternalMessage: statusError.Error()} + } impl.logger.Errorw("error in creating secret", "clusterId", clusterId, "namespace", namespace, "ipsName", ipsName, "error", err) return err } From ab5233fe150e5a4ba92cb96241969e9d4244ebdf Mon Sep 17 00:00:00 2001 From: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:36:50 +0530 Subject: [PATCH 17/29] misc: Pagerduty issue template change (#4796) * misc: Pagerduty issue template change Pagerduty issue template change to seek more information for validating the criticality of the issue. * Update pager-duty.yaml * Update pager-duty.yaml * Update pager-duty.yaml * feat: Adding script to calculate score for pagerduty issues (#4832) * feat: active inactive user phase 3 (#4649) * listing user and group changes * default value chnage * group listing optimisation * wire_gen * order by in group * default values * discard * updated on * case insensitive * script number change * specs * review chnages * sql update * id for user * script number chnage * review comments-1 * review comments * review comments * rest handler remove methods * validation for delete * delete user handling * review comments * review comments * bulk delete user and permission group * legacy code fix from loop in query to bulk query * export csv filters support * Revert "export csv filters support" This reverts commit 4448c9e0bcf69e7f583ed75d5931869aa0fd39dc. * export csv filters * bulk delete support with filters * validation * method break * refactor the method * condition * open api specs * specs * refactoring filters in bulk delete rolegroup * comments * comments * script * group listing * review comments * review comments * review comments * review comments * user delete self-review * error handling * renaming helpers * commets * checks and renaming * last login order * script number change * rolegroup migration * name change * backward compatibility handling * self review name change * chart-group -manager-fix * userrolegroups * change chnage operation * specs update * groups * find by componet id * job project id * sql script chnage * script number change * fix: extra check added for mono-repo migraiton (#4764) * extra check added for mono-repo migraiton * comparison of repoNameWithoutPrefix and appstore name for mono repo condition * refactor * remove prefix * fix * remove-gitops-prefix-oss * refactor * refactor * refactor * added request method in audit logger (#4817) * fix: rolefilters correction with all applications and particular application selected. (#4820) * merging issue resolved * all namespace, cluster, kind remove as it is used in key * removing unnecessary code * fix: 5xx 4.0 iter (#4620) * bugs revert * 4th iter code changes * delete dag exec * initial poc (ongoing) of handling grpc errors * grpc error handling at install helm chart and template services * refactoring * refactor * code review changes with some refactoring * removed unused files * refactor * chore: refactoring v4 (#4775) * removed registerInArgo multiple impls * extracted app metrics code * migrated envLevel app metrics code to new service * chore: Removed unused jira and migration integration (#4498) * removed unsued jira integration * removed test-suite-code * db migration conf removal * chore: removed unused injection * chore: removed dead code * added: migration script --------- Co-authored-by: Ash-exp * chore: App store dead code cleanup and restructuring (#4497) * moved chart-group in seperate code * removed unused dependency * removed dead code * extracted resource tree * moved notes * resource movement * removed unused code * removed unused dependency * commit methods * extracted status update * chore: clean up unused dead code * updated: EA mode docker file * updated: migration number --------- Co-authored-by: Ash-exp * chart ref refactoring * removed infra metrics db calls * moved app metrics repositories from /internal to /pkg * moved: const and types to bean * removed: unused const * review comments * migrated some methods from chartService to chartRefService * added dt validation service interface * minor refactoring * moved validation method - 1 * wip * removed redundant appMetrics req obj * moved app metrics bindings to wireset * removed multiple dead code * remove redundant dependency * moved ChartGroup router and rest handler to respective folder * stage 1 * gitOps refactoring * moved gitClient code to a common wrapper service * chore: AppStoreDeployment Install flow refactoring * review changes * wip * fix for unsupported charts * refactoring: App Store deployment services * minor cleanup * renamed remote package to git * renamed gitOpsRemoteOpService If and impl * migrated usages of gitService to gitOperationService * shifted git service and all gitOps clients to pkg * gitops repository usages refactor * refactored gitOpsRepository usages * gitlab client creation refactoring * renamed util/ChartService * reverted renaming changes * reverted renaming changes * reverted renaming changes * wip * wip * removed typo * changes * changes * extracted trigger cd, nats subscriptions from wfDAGExec service * removed gitOpsRepoName fetch logic duplicacy * minor change for cd trigger method * removed redundant imports * extracted deployment bulk trigger publish event logic from workflowDag * extracted manifest creation code from WorkflowDagExecutor * moved WorkflowStatusUpdateHandler * removed old refactored code * wip - extracted k8s op method from workflowDAG part 1 * extracted artifact logic from workflowDAG * extracted artifact logic from workflowDAG * refactoring * replaced slices -> k8s.io/utils/strings/slices import * replaced slices -> k8s.io/utils/strings/slices import * fix prod bug * renamed PrePostStageTriggerService -> preStageTriggerService * fix for rollback * wip * refactoring pre & post stage service * updated PreCdTriggerService * migrated AsyncTrigger consumer to eventProcessor * review comments * removed whitespaces * migrated ci material topic to processor service * migrated argo app status subsciption to common processor pkg * migrated argo type pipeline publish and process to common pkg * migrated appstore bulk deploy topic * migrated cd bulk deploy topic * migrated appstore helm install status topic * migrated git webhook event publish * minor changes in manifest creation service * minor changes in cd trigger service * minor changes in cd trigger service * wip * minor change in async helm install req handling * wip * updated common lib version(synced with main) * safety check for concurrency in pipeline delete and asyn trigger * updated common-lib version * updated common-lib version to main 5807b130153800727ace993e98b24cb27b8fc1fa --------- Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: Ash-exp Co-authored-by: nishant * fix: hide ldap creds in in get req (#4788) * hide ldap creds in in get req * removed unnecessary code * refactor * revert * refactor * pointer binding (#4826) * Adding script to calculate score for pagerduty issues * Update github_pagerduty_issue_score_calculation.py * feat: Branch Divergence Checker Plugin (#4806) * Branch Divergence Checker Plugin * Delete scripts/sql/229_GitHub_branch_divergence_checker_v1.0.down.sql * Delete assets/GitHubBranchDivergenceCheckerlogo.png * Adding assets * Changing asset name * Removing asset * Update 229_github_branch_divergence_checker.up.sql * feat: Added Apply job in k8s plugin (#4828) * feat: Added Devtron CI Trigger Plugin v1.0.0 * feat: Added Apply JOB in k8s Plugin * modified structure * Added error handelling * Removed CI trigger plugin * Migration number changed * logo changed * feat: Information of Linked CI Pipelines on Parent CI Pipeline (#4786) * Refactor ArgoUserService.go by adding a TODO * wip * updated API spec * Update repo and service * Add logging of errors * modified API spec * added total count in query * updated service and rest handler layer * Update router and resthandler * set default req params at rest handler * moved generics to global util * Update resthandler * added tracing for new queries * fixed undefined ctx * Update handler's rbac * update error logs * Update cipipeline repo * added: comments for changes * Change handler * fixed typo and imports * fixed adapter and constants * updated test file errors and mock files * updated API specs * fixed API specs end points * fixed: query errors * fixed: linked cd condition * fixed: typo * fixed: search * fixed nil check for runner query * fixed: append in adapter * fix searchkey to lowercase * Fix duplicated env names * fix pass env array as empty * Fix error logs * fix error * fixed import --------- Co-authored-by: komalreddy3 * Update github_pagerduty_issue_score_calculation.py * Update github_pagerduty_score_calculation.yml * Update github_pagerduty_score_calculation.yml * Update github_pagerduty_issue_score_calculation.py * Update github_pagerduty_issue_score_calculation.py * Update github_pagerduty_score_calculation.yml * feat:Github Pull Request Closer (#4833) * PR plugin script * Update 230_Github_Pull_Request_Closer.up.sql * Update 230_Github_Pull_Request_Closer.up.sql * Update 230_Github_Pull_Request_Closer.up.sql * Rename 230_Github_Pull_Request_Closer.down.sql to 231_Github_Pull_Request_Closer.down.sql * Rename 230_Github_Pull_Request_Closer.up.sql to 231_Github_Pull_Request_Closer.up.sql * Rename GithubReleasePR.png to GithubPullRequest-Plugin-logo.png * Update 231_Github_Pull_Request_Closer.up.sql --------- Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * chore: Image scanning refactoring (#4802) * added sql * updated event type for job in workflow request * added comments, handling for global cm/cs * fixed json string global cmcs * updated global cm/cs spec * wip * updated sql script number * updated sql script number * Update github_pagerduty_score_calculation.yml * Update github_pagerduty_issue_score_calculation.py * Update github_pagerduty_score_calculation.yml * Update github_pagerduty_issue_score_calculation.py * fix: helm deployements stucked in queued for devtron apps (#4842) * ns not found in case ips is being injected in cluster (#4844) * Delete .github/github_pagerduty_issue_score_calculation.py --------- Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: Raunit Verma <155707586+raunit-verma@users.noreply.github.com> Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: Ash-exp Co-authored-by: nishant Co-authored-by: Kiran <155609672+kirandevtn@users.noreply.github.com> Co-authored-by: komalreddy3 Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * Adding scritps to calculate score (#4848) * Adding scritps to calculate score * Update github_pagerduty_score_calculation.yml --------- Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> --------- Co-authored-by: Yashasvi17 <155513200+YashasviDevtron@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: Raunit Verma <155707586+raunit-verma@users.noreply.github.com> Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: Ash-exp Co-authored-by: nishant Co-authored-by: Kiran <155609672+kirandevtn@users.noreply.github.com> Co-authored-by: komalreddy3 --- .github/ISSUE_TEMPLATE/pager-duty.yaml | 73 +++++++-- .../github_pagerduty_score_calculation.yml | 27 ++++ ...ithub_pagerduty_issue_score_calculation.py | 139 ++++++++++++++++++ 3 files changed, 224 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/github_pagerduty_score_calculation.yml create mode 100644 scripts/utilities/github_pagerduty_issue_score_calculation.py diff --git a/.github/ISSUE_TEMPLATE/pager-duty.yaml b/.github/ISSUE_TEMPLATE/pager-duty.yaml index 44a6d01ced..4277e632bd 100644 --- a/.github/ISSUE_TEMPLATE/pager-duty.yaml +++ b/.github/ISSUE_TEMPLATE/pager-duty.yaml @@ -18,15 +18,66 @@ body: description: "A clear and concise description of what the bug is." placeholder: "It bugs out when ..." - type: dropdown - id: criticality + id: affected-areas attributes: - label: "Criticality" - description: "How critical is the issue? Please include the impact in the " + label: "Affected areas" + description: "What areas of Devtron are impacted by the issue?" options: - - P0 - Critical/Blocking - - P1 - High - - P2 - Medium - - P3 - Low + - Devtron dashboard completely down + - Login issues + - RBAC Issues + - CI + - CD + - App creation + - Deployment from Chart store + - Security features + - CI/CD Plugins + - Other CRITICAL functionality + - Other NON-CRITICAL functionality + - type: dropdown + id: additional-affected-areas + attributes: + label: "Additional affected areas" + description: "Are there any additional affected areas?" + options: + - Devtron dashboard completely down + - Login issues + - RBAC Issues + - CI + - CD + - App creation + - Deployment from Chart store + - Security features + - CI/CD Plugins + - Other CRITICAL functionality + - Other NON-CRITICAL functionality + - type: dropdown + id: prod-environment + attributes: + label: "Prod/Non-prod environments?" + description: "Is the issue affecting Prod environments?" + options: + - Prod + - Non-prod + - type: dropdown + id: user-unblocked + attributes: + label: "Is User unblocked?" + description: "Is the User unblocked?" + options: + - Yes + - No + - type: dropdown + id: user-unblocked-reason + attributes: + label: "How was the user un-blocked?" + description: "If the user was unblocked. How was the user un-blocked?" + options: + - TEMPORARILY - By disabling a CRITICAL functionality + - TEMPORARILY - By disabling a NON-CRITICAL functionality + - TEMPORARILY - By doing some changes from the backend/DB + - PERMANENTLY - By giving a workaround (From outside Devtron) + - PERMANENTLY - By giving a workaround (Within Devtron) - type: textarea id: impact validations: @@ -91,14 +142,6 @@ body: - Something Else validations: required: true - - type: textarea - id: environment - validations: - required: false - attributes: - label: "🧱 Your Environment" - description: "Is your environment customized in any way? Provide your Browser version as well." - placeholder: "I use XYZ for ..." - type: textarea id: solution validations: diff --git a/.github/workflows/github_pagerduty_score_calculation.yml b/.github/workflows/github_pagerduty_score_calculation.yml new file mode 100644 index 0000000000..7e88405c77 --- /dev/null +++ b/.github/workflows/github_pagerduty_score_calculation.yml @@ -0,0 +1,27 @@ +name: Issue Created +on: + issues: + types: [opened] + +jobs: + extract-issue-body: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Check if pager-duty template is used + if: ${{ contains(github.event.issue.labels.*.name, 'pager-duty') && contains(github.event.issue.labels.*.name, 'bug') }} + run: | + echo "Issue was created using pager-duty template" + python3 scripts/utilities/github_pagerduty_issue_score_calculation.py + env: + ISSUE_NUMBER: ${{ github.event.issue.number }} + ISSUE_BODY: ${{ github.event.issue.body }} + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + PAGERDUTY_SCORE_THRESHOLD: ${{ vars.PAGERDUTY_SCORE_THRESHOLD }} diff --git a/scripts/utilities/github_pagerduty_issue_score_calculation.py b/scripts/utilities/github_pagerduty_issue_score_calculation.py new file mode 100644 index 0000000000..3a32543573 --- /dev/null +++ b/scripts/utilities/github_pagerduty_issue_score_calculation.py @@ -0,0 +1,139 @@ +import os +import sys +import re +import subprocess + +# Dictionaries to store different options +affected_areas = { + "Devtron dashboard completely down": 100, + "Login issues": 50, + "RBAC Issues": 40, + "CI": 50, + "CD": 50, + "App creation": 30, + "Deployment from Chart store": 40, + "Security features": 50, + "CI/CD Plugins": 30, + "Other CRITICAL functionality": 30, + "Other NON-CRITICAL functionality": 20, + "None": 0 +} + +additional_affected_areas = { + "Devtron dashboard completely down": 100, + "Login issues": 50, + "RBAC Issues": 40, + "CI": 50, + "CD": 50, + "App creation": 30, + "Deployment from Chart store": 40, + "Security features": 50, + "CI/CD Plugins": 30, + "Other CRITICAL functionality": 30, + "Other NON-CRITICAL functionality": 20, + "None": 0 +} + +prod_environment = { + "Prod": 2, + "Non-prod": 1, + "None": 1 +} + +user_unblocked = { + "Yes": 1, + "No": 2, + "None": 1 +} + +user_unblocked_reason = { + "TEMPORARILY - By disabling a CRITICAL functionality": 3, + "TEMPORARILY - By disabling a NON-CRITICAL functionality": 1.2, + "TEMPORARILY - By doing some changes from the backend/DB": 1, + "PERMANENTLY - By giving a workaround (From outside Devtron)": 2, + "PERMANENTLY - By giving a workaround (Within Devtron)": 1, + "None": 1 +} +# Function to extract and process information from the issue body +def process_issue_body(issue_body): + # Regular expressions to extract specific sections from the issue body + affected_areas_pattern = r'###\s*Affected\s*areas\s*\n\n(.*?)\n\n###' + additional_affected_areas_pattern = r'###\s*Additional\s*affected\s*areas\s*\n\n(.*?)\n\n###' + prod_non_prod_pattern = r'###\s*Prod/Non-prod\s*environments\?\s*\n\n(.*?)\n\n###' + user_unblocked_pattern = r'###\s*Is\s*User\s*unblocked\?\s*\n\n(.*?)\n\n###' + user_unblocked_reason_pattern = r'###\s*How\s*was\s*the\s*user\s*un-blocked\?\s*\n\n(.*?)\n\n###' + + # Matching patterns in the issue body + affected_areas_match = re.search(affected_areas_pattern, issue_body) + additional_affected_areas_match = re.search(additional_affected_areas_pattern, issue_body) + prod_non_prod_match = re.search(prod_non_prod_pattern, issue_body) + user_unblocked_match = re.search(user_unblocked_pattern, issue_body) + user_unblocked_reason_match = re.search(user_unblocked_reason_pattern, issue_body) + + # Extracting values from matches or setting default value to "None" if match not found + affected_area_value = affected_areas_match.group(1).strip() if affected_areas_match else "None" + additional_affected_area_value = additional_affected_areas_match.group(1).strip() if additional_affected_areas_match else "None" + prod_non_prod_value = prod_non_prod_match.group(1).strip() if prod_non_prod_match else "None" + user_unblocked_value = user_unblocked_match.group(1).strip() if user_unblocked_match else "None" + user_unblocked_reason_value = user_unblocked_reason_match.group(1).strip() if user_unblocked_reason_match else "None" + + # Retrieving values from dictionaries + affected_areas_score = affected_areas.get(affected_area_value, 0) + additional_affected_areas_score = additional_affected_areas.get(additional_affected_area_value, 0) + prod_non_prod_score = prod_environment.get(prod_non_prod_value, 1) + user_unblocked_score = user_unblocked.get(user_unblocked_value, 1) + user_unblocked_reason_score = user_unblocked_reason.get(user_unblocked_reason_value, 1) + + print("Affected areas:", affected_area_value) + print("Additional affected areas:", additional_affected_area_value) + print("Prod/Non-prod environments?:", prod_non_prod_value) + print("Is User unblocked?:", user_unblocked_value) + print("How was the user un-blocked?:", user_unblocked_reason_value) + + # Checking for required values and skipping execution of script, if not found + if affected_areas_score == 0 or prod_non_prod_score == 0 or user_unblocked_score == 0: + print("One or more required values are missing. Exiting...") + sys.exit(0) + + if user_unblocked_reason_score == 0: + user_unblocked_reason_score = 1 + + # Adding 'urgent' label to the issue if user_unblocked_reason is 'TEMPORARILY - By disabling a CRITICAL functionality' or affected_areas is 'Devtron dashboard completely down' + if user_unblocked_reason_score == 3 or affected_areas_score == 100: + try: + + result = subprocess.run(['gh', 'issue', 'edit', str(issue_number), '--add-label', 'urgent'], capture_output=True, check=True, text=True) + print("urgent label added to issue", issue_number) + except subprocess.CalledProcessError as e: + print(e.stderr) + #calculating final score + final_score = affected_areas_score + additional_affected_areas_score * prod_non_prod_score * user_unblocked_score * user_unblocked_reason_score + print("Final Score:", final_score) + + # Commenting the final score in the issue + comment = f"Final Score: {final_score}" + try: + result1 = subprocess.run(['gh', 'issue', 'comment', str(issue_number), '--body', comment], capture_output=True, check=True, text=True) + print("Final score commented on issue", issue_number) + except subprocess.CalledProcessError as e: + print(e.stderr) + return final_score + +token = os.environ.get('GITHUB_TOKEN') +subprocess.run(['gh', 'auth', 'login', '--with-token'], input=token, text=True, capture_output=True) + +# Retrieving environment variables +issue_body = os.environ.get('ISSUE_BODY') +issue_number = os.environ.get('ISSUE_NUMBER') +pagerduty_score_threshold = os.environ.get('PAGERDUTY_SCORE_THRESHOLD') + +final_score = process_issue_body(issue_body) + + +# Removing 'pager-duty' label from issue if final score is below the threshold +if final_score <= int(pagerduty_score_threshold): + try: + result = subprocess.run(['gh', 'issue', 'edit', str(issue_number), '--remove-label', 'pager-duty']) + print("pager-duty label removed from issue", issue_number) + except subprocess.CalledProcessError as e: + print(e) From 126b69710abd6691ee624730aaf4485faf3c1454 Mon Sep 17 00:00:00 2001 From: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Date: Thu, 28 Mar 2024 13:46:04 +0530 Subject: [PATCH 18/29] Update pager-duty.yaml (#4850) --- .github/ISSUE_TEMPLATE/pager-duty.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/pager-duty.yaml b/.github/ISSUE_TEMPLATE/pager-duty.yaml index 4277e632bd..cfbf54a1f9 100644 --- a/.github/ISSUE_TEMPLATE/pager-duty.yaml +++ b/.github/ISSUE_TEMPLATE/pager-duty.yaml @@ -65,8 +65,8 @@ body: label: "Is User unblocked?" description: "Is the User unblocked?" options: - - Yes - - No + - 'Yes' + - 'No' - type: dropdown id: user-unblocked-reason attributes: From 698e4ab99841a4e5fc5ea22974c17b9782bcd5d3 Mon Sep 17 00:00:00 2001 From: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:27:25 +0530 Subject: [PATCH 19/29] Update github_pagerduty_score_calculation.yml (#4853) --- .github/workflows/github_pagerduty_score_calculation.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/github_pagerduty_score_calculation.yml b/.github/workflows/github_pagerduty_score_calculation.yml index 7e88405c77..bb3bdbfefe 100644 --- a/.github/workflows/github_pagerduty_score_calculation.yml +++ b/.github/workflows/github_pagerduty_score_calculation.yml @@ -23,5 +23,5 @@ jobs: env: ISSUE_NUMBER: ${{ github.event.issue.number }} ISSUE_BODY: ${{ github.event.issue.body }} - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + GITHUB_TOKEN: ${{ github.token }} PAGERDUTY_SCORE_THRESHOLD: ${{ vars.PAGERDUTY_SCORE_THRESHOLD }} From ed122786a623116b0c6b3c15c5aa1d524c0b8585 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:04:44 +0530 Subject: [PATCH 20/29] Added Field Path in Deployment Template (#4852) --- .../deployment-template/deployment.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/user-guide/creating-application/deployment-template/deployment.md b/docs/user-guide/creating-application/deployment-template/deployment.md index 587ea0d5b1..a8e1c46446 100644 --- a/docs/user-guide/creating-application/deployment-template/deployment.md +++ b/docs/user-guide/creating-application/deployment-template/deployment.md @@ -62,6 +62,14 @@ EnvVariables: [] ``` To set environment variables for the containers that run in the Pod. +### EnvVariablesFromFieldPath +```yaml +EnvVariablesFromFieldPath: +- name: ENV_NAME + fieldPath: status.podIP (example) +``` +To set environment variables for the containers and fetching their values from pod-level fields. + ### Liveness Probe If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. From 4874646eb83671f2b6898163ab6e58ef43faf70c Mon Sep 17 00:00:00 2001 From: Yashasvi17 <155513200+YashasviDevtron@users.noreply.github.com> Date: Mon, 1 Apr 2024 11:44:47 +0530 Subject: [PATCH 21/29] misc: Refactoring Pagerduty Issue Calculator script (#4856) * Modifying pager duty python script * Updated CODEOWNERS --------- Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> --- .github/CODEOWNERS | 1 + .../github_pagerduty_issue_score_calculation.py | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6135b5ad31..0dc15a5754 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,6 +11,7 @@ scripts/devtron-reference-helm-charts @prakarsh-dt @pawan-mehta-dt @nishant-d #Migration scripts scripts/sql @prakarsh-dt @vikramdevtron @kripanshdevtron @nishant-d +scripts/utilities @prakarsh-dt @nishant-d @pawan-mehta-dt #Github Specific .github/ @prakarsh-dt @nishant-d @pawan-mehta-dt diff --git a/scripts/utilities/github_pagerduty_issue_score_calculation.py b/scripts/utilities/github_pagerduty_issue_score_calculation.py index 3a32543573..087051908a 100644 --- a/scripts/utilities/github_pagerduty_issue_score_calculation.py +++ b/scripts/utilities/github_pagerduty_issue_score_calculation.py @@ -37,13 +37,13 @@ prod_environment = { "Prod": 2, "Non-prod": 1, - "None": 1 + "None": 0 } user_unblocked = { "Yes": 1, "No": 2, - "None": 1 + "None": 0 } user_unblocked_reason = { @@ -52,7 +52,7 @@ "TEMPORARILY - By doing some changes from the backend/DB": 1, "PERMANENTLY - By giving a workaround (From outside Devtron)": 2, "PERMANENTLY - By giving a workaround (Within Devtron)": 1, - "None": 1 + "None": 0 } # Function to extract and process information from the issue body def process_issue_body(issue_body): @@ -80,9 +80,9 @@ def process_issue_body(issue_body): # Retrieving values from dictionaries affected_areas_score = affected_areas.get(affected_area_value, 0) additional_affected_areas_score = additional_affected_areas.get(additional_affected_area_value, 0) - prod_non_prod_score = prod_environment.get(prod_non_prod_value, 1) - user_unblocked_score = user_unblocked.get(user_unblocked_value, 1) - user_unblocked_reason_score = user_unblocked_reason.get(user_unblocked_reason_value, 1) + prod_non_prod_score = prod_environment.get(prod_non_prod_value, 0) + user_unblocked_score = user_unblocked.get(user_unblocked_value, 0) + user_unblocked_reason_score = user_unblocked_reason.get(user_unblocked_reason_value, 0) print("Affected areas:", affected_area_value) print("Additional affected areas:", additional_affected_area_value) @@ -107,7 +107,7 @@ def process_issue_body(issue_body): except subprocess.CalledProcessError as e: print(e.stderr) #calculating final score - final_score = affected_areas_score + additional_affected_areas_score * prod_non_prod_score * user_unblocked_score * user_unblocked_reason_score + final_score = (affected_areas_score + additional_affected_areas_score)* prod_non_prod_score * user_unblocked_score * user_unblocked_reason_score print("Final Score:", final_score) # Commenting the final score in the issue From 8ceed7299c134ac41b0a5656e0e6dea98e202b52 Mon Sep 17 00:00:00 2001 From: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:20:42 +0530 Subject: [PATCH 22/29] feat: Add support for git hash in the app and app group overview section _revised (#4836) * Revised the api for handelling the overview git commits part using CiArtifact table only * Removed unnecessary comments * optimized the loop and intialization * used maps instead of slices * removed unused comments * modified the GetUniqueArtifactIds * Removed error from util function * renamed the function as generateArtifactIDCommitMap --- api/bean/AppView.go | 35 +++--- .../sql/repository/AppListingRepository.go | 7 +- pkg/app/AppListingService.go | 100 +++++++++++++++++- wire_gen.go | 2 +- 4 files changed, 123 insertions(+), 21 deletions(-) diff --git a/api/bean/AppView.go b/api/bean/AppView.go index ba54f8494c..e718d3bc96 100644 --- a/api/bean/AppView.go +++ b/api/bean/AppView.go @@ -138,6 +138,7 @@ type AppEnvironmentContainer struct { TeamName string `json:"teamName"` Description string `json:"description" validate:"max=40"` TotalCount int `json:"-"` + Commits []string `json:"commits"` } type DeploymentDetailContainer struct { @@ -197,22 +198,24 @@ type Notes struct { } type Environment struct { - AppStatus string `json:"appStatus"` //this is not the status of environment , this make sense with a specific app only - EnvironmentId int `json:"environmentId"` - EnvironmentName string `json:"environmentName"` - AppMetrics *bool `json:"appMetrics"` - InfraMetrics *bool `json:"infraMetrics"` - Prod bool `json:"prod"` - ChartRefId int `json:"chartRefId"` - LastDeployed string `json:"lastDeployed"` - LastDeployedBy string `json:"lastDeployedBy"` - LastDeployedImage string `json:"lastDeployedImage"` - DeploymentAppDeleteRequest bool `json:"deploymentAppDeleteRequest"` - Description string `json:"description" validate:"max=40"` - IsVirtualEnvironment bool `json:"isVirtualEnvironment"` - ClusterId int `json:"clusterId"` - PipelineId int `json:"pipelineId"` - LatestCdWorkflowRunnerId int `json:"latestCdWorkflowRunnerId,omitempty"` + AppStatus string `json:"appStatus"` //this is not the status of environment , this make sense with a specific app only + EnvironmentId int `json:"environmentId"` + EnvironmentName string `json:"environmentName"` + AppMetrics *bool `json:"appMetrics"` + InfraMetrics *bool `json:"infraMetrics"` + Prod bool `json:"prod"` + ChartRefId int `json:"chartRefId"` + LastDeployed string `json:"lastDeployed"` + LastDeployedBy string `json:"lastDeployedBy"` + LastDeployedImage string `json:"lastDeployedImage"` + DeploymentAppDeleteRequest bool `json:"deploymentAppDeleteRequest"` + Description string `json:"description" validate:"max=40"` + IsVirtualEnvironment bool `json:"isVirtualEnvironment"` + ClusterId int `json:"clusterId"` + PipelineId int `json:"pipelineId"` + LatestCdWorkflowRunnerId int `json:"latestCdWorkflowRunnerId,omitempty"` + CiArtifactId int `json:"ciArtifactId"` + Commits []string `json:"commits"` } type InstanceDetail struct { diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index 4de732409e..6b43d7bd8c 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -76,6 +76,7 @@ type AppNameTypeIdContainerDBResponse struct { } type LastDeployed struct { + CiArtifactId int `sql:"ci_artifact_id"` LastDeployedBy string `sql:"last_deployed_by"` LastDeployedImage string `sql:"last_deployed_image"` } @@ -157,7 +158,7 @@ func (impl AppListingRepositoryImpl) FetchOverviewAppsByEnvironment(envId, limit func (impl AppListingRepositoryImpl) FetchLastDeployedImage(appId, envId int) (*LastDeployed, error) { var lastDeployed []*LastDeployed // we are adding a case in the query to concatenate the string "(inactive)" to the users' email id when user is inactive - query := `select ca.image as last_deployed_image, + query := `select ca.id as ci_artifact_id,ca.image as last_deployed_image, case when u.active = false then u.email_id || ' (inactive)' else u.email_id @@ -604,8 +605,8 @@ func (impl AppListingRepositoryImpl) FetchOtherEnvironment(appId int) ([]*bean.E var otherEnvironments []*bean.Environment //TODO: remove infra metrics from query as it is not being used from here query := `select pcwr.pipeline_id, pcwr.last_deployed, pcwr.latest_cd_workflow_runner_id, pcwr.environment_id, pcwr.deployment_app_delete_request, - e.cluster_id, e.environment_name, e.default as prod, e.description, ca.image as last_deployed_image, - u.email_id as last_deployed_by, elam.app_metrics, elam.infra_metrics, ap.status as app_status + e.cluster_id, e.environment_name, e.default as prod, e.description, ca.image as last_deployed_image, + u.email_id as last_deployed_by, elam.app_metrics, elam.infra_metrics, ap.status as app_status,ca.id as ci_artifact_id from (select * from (select p.id as pipeline_id, p.app_id, cwr.started_on as last_deployed, cwr.triggered_by, cwr.id as latest_cd_workflow_runner_id, cw.ci_artifact_id, p.environment_id, p.deployment_app_delete_request, diff --git a/pkg/app/AppListingService.go b/pkg/app/AppListingService.go index f4b09f2e22..181e96ceed 100644 --- a/pkg/app/AppListingService.go +++ b/pkg/app/AppListingService.go @@ -143,6 +143,7 @@ type AppListingServiceImpl struct { dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService userRepository userrepository.UserRepository deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService + ciArtifactRepository repository.CiArtifactRepository } func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository repository.AppListingRepository, @@ -153,7 +154,7 @@ func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository re argoUserService argo.ArgoUserService, envOverrideRepository chartConfig.EnvConfigOverrideRepository, chartRepository chartRepoRepository.ChartRepository, ciPipelineRepository pipelineConfig.CiPipelineRepository, dockerRegistryIpsConfigService dockerRegistry.DockerRegistryIpsConfigService, userRepository userrepository.UserRepository, - deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService) *AppListingServiceImpl { + deployedAppMetricsService deployedAppMetrics.DeployedAppMetricsService, ciArtifactRepository repository.CiArtifactRepository) *AppListingServiceImpl { serviceImpl := &AppListingServiceImpl{ Logger: Logger, appListingRepository: appListingRepository, @@ -172,6 +173,7 @@ func NewAppListingServiceImpl(Logger *zap.SugaredLogger, appListingRepository re dockerRegistryIpsConfigService: dockerRegistryIpsConfigService, userRepository: userRepository, deployedAppMetricsService: deployedAppMetricsService, + ciArtifactRepository: ciArtifactRepository, } return serviceImpl } @@ -234,6 +236,8 @@ func (impl AppListingServiceImpl) FetchOverviewAppsByEnvironment(envId, limit, o impl.Logger.Errorw("failed to fetch environment containers", "err", err, "envId", envId) return resp, err } + + artifactIds := make([]int, 0) for _, envContainer := range envContainers { lastDeployed, err := impl.appListingRepository.FetchLastDeployedImage(envContainer.AppId, envId) if err != nil { @@ -243,12 +247,44 @@ func (impl AppListingServiceImpl) FetchOverviewAppsByEnvironment(envId, limit, o if lastDeployed != nil { envContainer.LastDeployedImage = lastDeployed.LastDeployedImage envContainer.LastDeployedBy = lastDeployed.LastDeployedBy + envContainer.CiArtifactId = lastDeployed.CiArtifactId + artifactIds = append(artifactIds, lastDeployed.CiArtifactId) + } + } + uniqueArtifacts := getUniqueArtifacts(artifactIds) + + artifactWithGitCommit, err := impl.generateArtifactIDCommitMap(uniqueArtifacts) + if err != nil { + impl.Logger.Errorw("failed to fetch Artifacts to git Triggers ", "envId", envId, "err", err) + return resp, err + } + for _, envContainer := range envContainers { + envContainer.Commits = []string{} + if envContainer.CiArtifactId > 0 { + if commits, ok := artifactWithGitCommit[envContainer.CiArtifactId]; ok && commits != nil { + envContainer.Commits = commits + } } } resp.Apps = envContainers return resp, err } +func getUniqueArtifacts(artifactIds []int) (uniqueArtifactIds []int) { + uniqueArtifactIds = make([]int, 0) + + uniqueArtifactMap := make(map[int]bool) + + for _, artifactId := range artifactIds { + if ok := uniqueArtifactMap[artifactId]; !ok { + uniqueArtifactIds = append(uniqueArtifactIds, artifactId) + uniqueArtifactMap[artifactId] = true + } + } + + return uniqueArtifactIds +} + func (impl AppListingServiceImpl) FetchAllDevtronManagedApps() ([]AppNameTypeIdContainer, error) { impl.Logger.Debug("reached at FetchAllDevtronManagedApps:") apps := make([]AppNameTypeIdContainer, 0) @@ -770,6 +806,49 @@ func (impl AppListingServiceImpl) FetchAppStageStatus(appId int, appType int) ([ return appStageStatuses, err } +func (impl AppListingServiceImpl) generateArtifactIDCommitMap(artifactIds []int) (ciArtifactAndGitCommitsMap map[int][]string, err error) { + + if len(artifactIds) == 0 { + impl.Logger.Errorw("error in getting the ArtifactIds", "ArtifactIds", artifactIds, "err", err) + return make(map[int][]string), err + } + + artifacts, err := impl.ciArtifactRepository.GetByIds(artifactIds) + if err != nil { + return make(map[int][]string), err + } + + ciArtifactAndGitCommitsMap = make(map[int][]string) + ciArtifactWithModificationMap := make(map[int][]repository.Modification) + + for _, artifact := range artifacts { + materialInfo, err := repository.GetCiMaterialInfo(artifact.MaterialInfo, artifact.DataSource) + if err != nil { + impl.Logger.Errorw("error in getting the MaterialInfo", "ArtifactId", artifact.Id, "err", err) + return make(map[int][]string), err + } + if len(materialInfo) == 0 { + continue + } + for _, material := range materialInfo { + ciArtifactWithModificationMap[artifact.Id] = append(ciArtifactWithModificationMap[artifact.Id], material.Modifications...) + } + } + + for artifactId, modifications := range ciArtifactWithModificationMap { + + gitCommits := make([]string, 0) + + for _, modification := range modifications { + gitCommits = append(gitCommits, modification.Revision) + } + + ciArtifactAndGitCommitsMap[artifactId] = gitCommits + } + + return ciArtifactAndGitCommitsMap, nil +} + func (impl AppListingServiceImpl) FetchOtherEnvironment(ctx context.Context, appId int) ([]*bean.Environment, error) { newCtx, span := otel.Tracer("appListingRepository").Start(ctx, "FetchOtherEnvironment") envs, err := impl.appListingRepository.FetchOtherEnvironment(appId) @@ -793,6 +872,19 @@ func (impl AppListingServiceImpl) FetchOtherEnvironment(ctx context.Context, app impl.Logger.Errorw("error in fetching latest chart", "err", err) return envs, err } + + ciArtifacts := make([]int, 0) + for _, env := range envs { + ciArtifacts = append(ciArtifacts, env.CiArtifactId) + } + + uniqueArtifacts := getUniqueArtifacts(ciArtifacts) + + gitCommitsWithArtifacts, err := impl.generateArtifactIDCommitMap(uniqueArtifacts) + if err != nil { + impl.Logger.Errorw("Error in fetching the git commits of the ciArtifacts", "err", err, "ciArtifacts", ciArtifacts) + return envs, err + } for _, env := range envs { newCtx, span = otel.Tracer("envOverrideRepository").Start(newCtx, "FindLatestChartForAppByAppIdAndEnvId") envOverride, err := impl.envOverrideRepository.FindLatestChartForAppByAppIdAndEnvId(appId, env.EnvironmentId) @@ -809,6 +901,12 @@ func (impl AppListingServiceImpl) FetchOtherEnvironment(ctx context.Context, app if env.AppMetrics == nil { env.AppMetrics = &appLevelAppMetrics } + + if _, ok := gitCommitsWithArtifacts[env.CiArtifactId]; ok { + env.Commits = gitCommitsWithArtifacts[env.CiArtifactId] + } else { + env.Commits = make([]string, 0) + } env.InfraMetrics = &appLevelInfraMetrics //using default value, discarding value got from query } return envs, nil diff --git a/wire_gen.go b/wire_gen.go index 9b5f563127..14280cc724 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -576,7 +576,7 @@ func InitializeApp() (*App, error) { appWorkflowServiceImpl := appWorkflow2.NewAppWorkflowServiceImpl(sugaredLogger, appWorkflowRepositoryImpl, ciCdPipelineOrchestratorImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, enforcerUtilImpl, resourceGroupServiceImpl, appRepositoryImpl, userAuthServiceImpl, chartServiceImpl) appListingViewBuilderImpl := app2.NewAppListingViewBuilderImpl(sugaredLogger) linkoutsRepositoryImpl := repository2.NewLinkoutsRepositoryImpl(sugaredLogger, db) - appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl, userRepositoryImpl, deployedAppMetricsServiceImpl) + appListingServiceImpl := app2.NewAppListingServiceImpl(sugaredLogger, appListingRepositoryImpl, applicationServiceClientImpl, appRepositoryImpl, appListingViewBuilderImpl, pipelineRepositoryImpl, linkoutsRepositoryImpl, cdWorkflowRepositoryImpl, pipelineOverrideRepositoryImpl, environmentRepositoryImpl, argoUserServiceImpl, envConfigOverrideRepositoryImpl, chartRepositoryImpl, ciPipelineRepositoryImpl, dockerRegistryIpsConfigServiceImpl, userRepositoryImpl, deployedAppMetricsServiceImpl, ciArtifactRepositoryImpl) appCloneServiceImpl := appClone.NewAppCloneServiceImpl(sugaredLogger, pipelineBuilderImpl, chartServiceImpl, configMapServiceImpl, appWorkflowServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, pipelineStageServiceImpl, ciTemplateServiceImpl, appRepositoryImpl, ciPipelineRepositoryImpl, pipelineRepositoryImpl, ciPipelineConfigServiceImpl, gitOpsConfigReadServiceImpl) deploymentTemplateRepositoryImpl := repository2.NewDeploymentTemplateRepositoryImpl(db, sugaredLogger) generateManifestDeploymentTemplateServiceImpl := generateManifest.NewDeploymentTemplateServiceImpl(sugaredLogger, chartServiceImpl, appListingServiceImpl, deploymentTemplateRepositoryImpl, helmAppServiceImpl, chartTemplateServiceImpl, helmAppClientImpl, k8sServiceImpl, propertiesConfigServiceImpl, deploymentTemplateHistoryServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, chartRefServiceImpl) From ac6802f70f0415cff30cccb440c1ef076869de1d Mon Sep 17 00:00:00 2001 From: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Date: Mon, 1 Apr 2024 19:17:20 +0530 Subject: [PATCH 23/29] feat:MailMaster Plugin v1.0 (#4825) * MailMaster Plugin v1.0 * Update 232_mailmaster.up.sql Updated Variable Names * Rename 232_mailmaster.down.sql to 233_mailmaster.down.sql * Rename 232_mailmaster.up.sql to 233_mailmaster.up.sql * 233_mailmaster.up.sql --------- Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> --- assets/MailMaster.png | Bin 0 -> 11663 bytes scripts/sql/233_mailmaster.down.sql | 5 ++ scripts/sql/233_mailmaster.up.sql | 96 ++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 assets/MailMaster.png create mode 100644 scripts/sql/233_mailmaster.down.sql create mode 100644 scripts/sql/233_mailmaster.up.sql diff --git a/assets/MailMaster.png b/assets/MailMaster.png new file mode 100644 index 0000000000000000000000000000000000000000..db59565b8003f57bb754efd6967fa00b5a768afc GIT binary patch literal 11663 zcmch-byS<(x-SYX(o&#!i$igD3tlv6f#O!&Jy_7TIK?TnXo27k#oax)6WoFn3xxuu zCwyz|wb!@yIp>di@3A$gogQP;h{XvYIF;sECK}lgCKP z-4m`x6cn@_ySI8EJyjKeg^LrXnWc-l6{nArE0T|bBIW}?J~~-}%xHX^9GyJ?KH^M& zNdS<~4`ePT2`rj_e#G1@tpS>{^8XY?uEd#aK_FKE7nirUH>Worr;ED{7mtXD2p2am z7cVadQi8+7*BNBy!{O{fheY~2l&qD9g}a?A$j-%?<^kHw+{F_l&cyUDsehPcY4NXZ zTs_?#|JuaTg3HR$%E`(ZK6-quqli07o-t8*wHd z4ofR*GfzhllZ2|7oujIm2gu5uhJ%JzjO)QRN4tMh{)_T=Jg)!Eu78RBo0I@$+<%M0KePbSmLC6R_=6`}R&M`6LB`>M;cDjY zVfEJ6)k>U6)5^oe)7`@A-;@7OKpA%{Gmw>~1TQx)KL-mSejXta0pb7N|1akMPbM~Qa* zw}yJi@?V|$kY6MNnP0xYuWE5|f9vAnC?R9!?1i-NKgs_$BmZmi{D%y32K`$JbNy?E zNf3^m*dwQ!C`e8ZB;(@rFdtBS+yRyVS!o4n8Fz!XJV0pxFCV)-fR|rTM1-G@Ux=Td zhll&2nAt%{A~zo|HxI8>>2LxP=BcZqz6S~lZtuev^)nkTISR@n#2b~ja>r+vg@Zd@ zrAw$box~n}1%o@j<;y76ouUzAHPHRyq1~GC{m%Kr)%|l^=N^vWVM_0QV8X;>+peb$ z-79-%yT=!zkz<(l-Q(*&7I)9U<;#z3I`Lh5k4`TSP7sHum-K#vYw&X-w_fw?xi1}? z8wcm3t3ReUPugY=`j(F7w@7kWly|fuB?}6to zJvMps?7_pK-&eiMmh<|yKeuftB~CgQEIx7Q?wC8&Pn+R%e{8Qr^*N2gy>XpE% zmHwrpxvkS`=$=!-!sfw+f5i%+TQ7lYFP=-UWB!70_^4a)qG#z6sAOs9=;9T4@cYC* z`~=}%y!6beo5HLAZR)gb-n>Kpf<0uxrEpOpZlZPOplE2I3gG}T0XM(I%ZrkVWMFYUN-@MUjx%hol;Jmd}9);@oRyn#S9tBlPo@|~zcme9`T|63r9qW9Vj%`|puKh@A-56W_q5E~l zBy*P0e<-_WJHLNNC25k*Z*c4IqJ8$@Lk%pVe)VhT=FH{^Q^1f}_T22&>9^i(wxD6n zl&R9;-Mt?O#rO%;q)Cm>Q<3#+@y+Wg?He&o>kHebd?6!NIrCLxdkHP;646k(*m0Sd z@$kCUxaRfLj!ny)xzzSe>)iSJN%-lX);B093{Qcw(r90gjv7{mM@!uliIE?|JO( z$$8bVs{=1YZrHbQ9BX# zD*ETeeOur8$3(V!V6;EI+u`(pAu9K^sbzOjPW38Q)JT0Sov1u!X!+`QOB&C?Qp-`> zZv%NLVs}TryhXo!Ts#exSABwF`d)*kIh!F7R2g7!`%%i z;aURRHeEhH?|?M~&L>ghNL&$IF^Rz<*`2W4+q*wkzR)RX5#Jg2``H4;FHEY&=@NUv zbW3O`2_^B<-0)DZbpJV-+5F+i31z8L*%LdVQgmebqM*VPdu)uIAt^joQtYU6BKLgR zpQ1_>lC2vqS*wddDsH(_^|Ej;h2%b46mEUjq&0YyvnijECc2B}hnP{HRLIs-OnHuD zMV&KH>-Xc?{k4eqHeFn01J%wtGaNGyd2q z#&RrRd6o`y8X93Zkpb27 za7^E`utdQu_hdu|mTn_)#PvuMW$t7KnWR9rT|#(!H~_E&@xG4$elF_OBWU8C8Th81 zasisDnp`CLP4}@);bb#-%}IA^XSz#OB+zQF6$TD~Wjm`E>fLFX%W*)_loVD2qQINVfT2uEy!`MGoKBaH;~XcOj{T=*$DEWcB3b6@getScTZnr!ekNRU37}g_QT3nkfR66 zhkN+ytn_NYJ#}bingWhGHiFIwCh1=Yw1(ccl<#&qJZmaDILc`j0u#yl zYU~Jln&YLiaacTd&QCCe84zwmbJ zD}*vtSZ?r0KyGX#Gxk1JO82nj3v7AZl^v2d^|M8?>wno~fP^+$_N%)eT4_35r}p7? z=nrd?8mn0C(wor-MQ3Tj`Z2EQUSNGqiN(}cce@@+GFpy{Yi_y(uSzR$XTjHuulY3hEMtG4;F`M%)Al z0)ffs19lmg0}*@zt3j1peSZQa)(=KF7@!}_QEl%2L)^DQsPco)J&?9{U_s!3IK?dXr2~1`0bBgbGr&C^C2` z8&gHJ`2N6`FvXK~6|wo{W;-$mrEYG)T4hY!khnDZez-PC$)(vN97O~A(!%dqs}eNxvwiPM+O#(A|f2480m zFo_?adSkiQ5Rb^cY+XBbI>Y)3q;YoWQ}|09=h*+-ezFvv)RnA5{c&)8nMWF|@4}d% z{xpB(kF4mPyRkK)o3tx!YB4KVsq*S7=Yn-mtSjl}GV(a=fl*(i%MP~6ao<|1Af1u(wJ9?nr1(ac#3=5fg#KQ>vLz)w{!UA*L+s#dM7ylxdn)U%)- zT2X;{G}b;P2?>n+DrC7{d-fx69VC*dSHUIyawdYTEcqshjX|Wa9m| z9Fp3f?f0#sTUIfp>=p@39}!PE74G;YA>Q2cwsX7<*Y|&B$;H5hA&hQQT5sc_YCMb1 zK_FHyonJ*({MKp8H(S<*&NzV6Qtvcz_d|RS;JCmFs7cW*c^OP9;d=PW1IQ%^^K$v1 z0mmNkX*K7BGm*KpQqWkekH1J)zj515G;wLaJo5e;U}An>RBpAK80d^P+HlRImIBg6 z!*IG3OhrI7s@6t{)o4{@HpX$%^SeZPVq-#3)uR>|q5b*XaOcrJ7@arTjXz#BaWSc} z9B|w{qQ;7a=J$QX%Tt#>RH6un(>Z+Cbt)c$*vefa!spf>g9L;)=L&A^}e-bodF2Jthal)?;< zGq10JO1h`Z9s;~(jBd2=81kmJypSOleV$F#-Tq5%2BL#WuvJtxDPYqdd^ee+Bk3mi z+Wrfx|td9bD4$;3zUA>l5Um`dQA$`p`u7AZkWQ$NBxsKagy zN0N|RwqwG6&-F2BnBX=qS)&0h72`GCR}SJ8%{%C);_7L-+??Um#-LUbqx;g=;5aVA zfiw1{sxG%c8~yIG`ORe+{zWq%^OpGG9xqW0|C~)(7G0pn^v+$4FA-uy?l5MdeQQcl zLOFp+G2;?g^aP6`K-s*dRE9@eNBPvVedk%ZS1qPt+V`Eq9APu*Xjj3!749s2;~6Jn ze`UG(%jTH1%BM+Zl9+{(jfrpAJqg(dGtEmouHCh^@*E+xtb_T}g2{R0x>4Orq4gk+ zmtVzAe2lrA!+u-imBq8`25k3nW6v^BgA?kmP$8x1gZ ze#AUy%&I23pVZ%c&+RXoiT(Un(e58_&}N4f4W#zfgut?7NZvAO?hn%RT4L6_%vn6C z(J-A<08=$gW+`SSkX?8k3VVKU+QrrObo{ltuf`u^S%uq}3UmFNo zN$d?<_lM}tGo231H15YtnPW=mS<@P~lgEP2XgJ-s-UI1%{u!Id;lj2~Q${Ghp%N_} zdU{RK4(iXsjC0m+Hnpp)0y?pIRoha(#wYz=|F+JS`3=sl<9k?<+@K{KkuSvZ_*-)7 z2kfzv-e{@ah@gXTqu5tP&#Q7TJSLsM$4O7swRWr)RJO14g?!s@%?nXtu9@cl7_{1@ zxy->lNiO#d+^7nrn5XTt%mO+m_K&a6w_XDdeLt1P>xlexYs;Qqedd{4*3!4({stP| zc)3ScK8NaUGFN0aDhV$;B%Ro7ccRgqvvbbWql=SSP;qW-fLEX3*C zT2t+k-{%YKD%_SaB*=8$*rR0_N=(8mn1AM5@D0fta}5rl#yV{|$mocc^Jtw9mNe#b zUHYpsl44QKu0Nk-f)8eyl`$5(JXgx@&r}<=e58!Z(G90{0@OOQp&K83JqBXCg-Rl zJs7KmhfVMG_8dgm55M)0W!}86oWd8CzrCMIcV+qh2{G#qVrz-L;>c!*zO+LP6Nn4? zl2Rqx5L>0vMyMqPI9fjWT)ZXcWVZSndMs|VyWT!;3(gA5pus7K()v;v<*!2cwj(0Ji@tS#Z6Z)BG`!E%)h zbYGOF^ym4D=4n3gl8+oX6N=)uEuJlz&&WD zdHU}>R625(P^vx+k!|0!0k5YL%rQlVqo>6VqAN8UDq-1dbBv~nnR(Zj1DELGl8^{H z+DxnD#)BH7bqBwYD~p6Rt9c0U&&2q5afLd5T*5XiNBZsf*k1R_w|A@c9@TZaRk`YR zGHEkfH|~BC!J9QW<2^mu4CjWV4q!gk^Sk>+^u^XCJ`tVY(%18FmU1VBs|*a&$4xCz zLA<`{6-VyGC519wpecCz^3z9uPHt)LJp#kHdEe-Z&w|EkKEk2pU4@uG9sDv8Y{~6H z6h}4gXyWTriZ*hE(pw9aKld?0hE3nzdSiT>*k!7T{qu1cmo4b2J150qqCi_-P@w%` zRK*v&ld}|Dun5DCx#!>PnblU>BQ8&~s?&?j1v)N1;?REeXF6d~@@M^D`kiJNU9-B0;o`Po*WV zrotATzoqqCKBp4#_ttx$agdcmTZGAFLTqw@zh2HqtX8Z2k){I?$wP{7Tk}L-*KF|@ zawe&Si_?C5?V<7oH5L1dZ^8{Zb;o|m#OpbJZDuJT^#8mkkVPlUsu=$HDzl}O?~nL5 zb@x2YVciTYPq2S+4Q^>`l!<`a$=Cumu6oYEra)DXD6#dJx8KVd77pDxv$?&?C9+_q-Aj@xm1$t`CJ*7{yeLbhG`SwH{uCB(LO`TVx|lB}_EFS!jypj=R? zMGCK4>mIL{!CM>*^~f{${rVhILkx=7`!$?M%rjZKRuonOZ}ZJWcX(1-%GL}5yb9B= zm$4RVS!aJ0n&D09I22>bN)OYK8!u#&rl2-&Zn%KJNDSQg<_3gHR_!@kjlaT8i@!O8 zG4CPm9%Lm*3^WVKyL2KM4h54$;eN$1-|i zd9eNV2ws|OT>WHj3+Ib)!55Gh;mfCQb#)Q5CXJ21CZ&G8_$sueE+O-kx$N1BSrw5+ z-Lz;=P2nxu$X;tD$sXfn!rI!&G&&gWi5my2Q2FM`!VbpK8&JMXg)Ufp8xdCh8jk1+ zjQz!?D|R9>)9zBVMYmkcb|3W?tor;~Z^slsLZPXr3rhD`((GD*QEj&FjW;g-a;s#H z-srwDFSUBNR&({CbCrjHeYUu&VjpLqWt+J^)3-|uHC1&UbJ)9C9Byf)CCUiKxvwkA zUQI{ct^3(5O?qf*x}B9^#2c{jLujJ(j%uV#1a5W}zLHK$LlchP#mF3#eT-}qat8c;~ zj+_>&7|$nI52HBSq-giN1HU=15B|zq2La>ek<+wVC*M6}d%OfJ3h!m*38m2YOMWkbsgL+MGL*OZkXg0Tl!* zkDJt9Dl}fN^pY#Ttk$m(2x+|usQZ`+#$7lr%#_y~44_83I=rv4@_0cs@-W z$N!k5ZCDj!<}P1|64I$xdhS=20u|=u-od=jy~Fyr)-C==&zY;a15u#ICLE;tN2N}D z(Qq01wDY>;Gx4y(piXzd2&Vxg8)R9SSlV8}e0X|ZJlp=V!_lWX%+P(HGgtSQ_RBi; zlQEmb7dhU3cdP}EbzS`1@`d_svTov2mgI~#R?4)mlulSexH1=ASj4u6O`!Z{(3Y*f z_G^pi)Ri~Ic5lS9ABDTF@Ub&O_^u^JU@Fv0Vl(#B&nd4>jKnrzlw<=JbH?l3&L*Aa zG646&;bRrPl9WG|Kkv%daDlf}L2Vv)L0Z<|8v%wFQtRg8QNwRTkqP6$5{vVC2aMzl zkb=6roi=<)-Xs%jQ7#jem~av5nwwlQ;?P4!S*$Bjq&lS$`xbnh&%P)wJEcEv)a2`n zkDjq#tSL;!2rqN5n!#SNeUw7F?rklf38J3^*vk^QAr({)dpDH;?v;AWwMk(Ys8#k2 z9a9c>-Hen&d>mZLxX_J{r2=wJl{rbV$*T65L<$XI^(m{YMg{TE8UXTs(7m8#qJvww zYFTxi1TE>@SMbyx6;Y6_JZ`SFq{$@ysm1n7fZ$ZwaF%R^NIpLHZn89c?=_lO8(e>O zl!j7&HTlH0`*CZ+BoUZ91k@jhT{5j6!Ls*dUt2wX^5xVb_vzqLkR^o(^%JUDO&hy| zRf>nv-l2B3bw_{7cf$9Hff}w@R%fvi>uj+;`YS^vhU}nf)wLIV7e5nYhl(Ts_<87$qB#M5&h z6-LU)x}Py~lN@4`4*Wm`^YS)cx?Yi2U>pIVdB>@Qb&u?Q3K;jKLy`VnvU1jod}8^egnMrBT_hX9AxdGSm-&^yO`5`Z6+*k6UnRmwKY z)8Ie1LT19ihQG}LHdt5prs%Id?C4(HB&Zr2YsezVGH%~aDq8i@Jh)`lPkc*~M1M9fp<8HH%;Y~X9Mv*9_BPP*PXtr%r zSxe+@>F`5Yu}2$1Xl3#v8iQ5J@n0%WrFQ;EAlZpR3`RU-b`clblLSJPF$vCWy+WW$ z+8Vkq7Xk>%DwsDP0rX&4KokDzbM0hc_=&%8MkThW zlE>Xi1zrGj1K`zAnD$kIl4;5h;aRML5-#hHWH7Aa6SHj$!dAdRk|PWEgwDGR9pWO_^CHB%|fUlx$(^Mep6o>Pd=@te#^|Y@hgDE85sb&1vTy z+8FHEw0A1e)2ZixP*kYV&PjZ(pxngr6u<3J)81g@=A&2QG_2u1%)d}7AFm!n73giY zQ}uekGbj_VhtLs|@(es{-MV@&po)xgfTUWRie;w8N95@HDt zx-j2XROn7McVK93argSvbEeNDcd1xwQLaHtiC-A`BWeTo`J)8|@*l{q+80WPm?v;W zIr%dR&_Y1$9C4p)<)*&oDMs%n!?0XwfM&vEbm24HWLi(T1Yd@==pvHXIx@GBH}r}M zy>_eqONu(Z2J8FC1`@ZHNGErQc?3V{);ex6$-Zzn<;mt;U*?dca$GoPi2H3^hwcw$ z${|mFCY$L@1J{D=p!KMisM6Q=#JIj4QxG6r$eH(I4uCsXe7+~B{bMG5$l$FazG4o? zrdJ=_z%4~lbm59t-F3;2cUxTfDCblf+!5)k``>eQ|@CT*1u zn-1yeuZ}%z5zNM}4%|)4#}?(*LGTD+jzLc&?b%h9=MW!W?m2t@2hxJLP3Z4dcdDxP zD&K0IBw%=--G>}kiU&yGFyX#t0!-+Jr0=SylGKtXlj$G?7%0P=$WAjto%YoNx!`At+VW-dSApqhEk^ z&Ylsuu~S5^A5_U3i()pA?kp23$TVk95C59_o+5*(F6q$tP#59B;-^O6! z(rIr%OTBu_uVOi!wQ#1?aj>=%iX++e2XA^FII*vW;th6KTmKo5OgqUpm zR8e1mvt&_Dn;1(pZaSEV+l<6dkeI|0n6e~_*E`(*k`TMiL>9nR{*%%vFjX%hHskGR z`+EcUVqJC?S{xowA!?62aK2UZ(@~|1E;$xlG0?nBwsN&vXZe=^_Ef^=FCM}+aCp6* z!;=SFmi4D(bj!3Q;k6seA{YdFwq>4Dy}*-rm0aBuiJ(k5e9ru`hQxX$e> zTPdM}aTsDK7Y1rX)X~o4r(x04z4N)XjmLe_B($@s;lV@Azq~%o9G|5jm8<&|Gi$wC z=kmcRb%Me`WAgV6uZMw>Pf6Nv>Ez`mjDcm^Jn%thdZwgnqK}Dsr3O_(Q1>gLMJgVt=IRbH(FRrPFg#dC z55{@ayT>z$hb$CXf;NoDn=phMnWhxLe=lPR3>5yh-$YB?w7?lBM#yO%9b@bp_ zEa5p;y?!}dI2{mCLN1L2f6%&(rE{L4l`7N5cqlE-NR!WxBI^ol9~`BhEVaLmt;1lV zY)=W6tpvEf@6dC2H-))-n`!x^3)LpPnWIb^xaJIRO$_dQ99!4ynDa3~!kRtOL?$d- z&w*wt;Wu5RiRU9+50QR@kMXO7Q1!PyIWJLBC??dz4WGir8Dxd<@XO}EAMN@POLM`9YllZ zB@<*T%^t1le&i&Sg!Ei}ir*Ko>}0YndKLV9YM?9C9h;TDCHz^hu2&7(Lo?4IaVR5n zS4d6~DuBdn#avxF*Tgqvvutq5Fu6+I#I~_uw}*D+$*H53)ih6CcC7 zr&r1hca0=Jl;^Gb8m#;W!G?H!<4@2gkL_Ak*R1pf=}PsBND~}+Yg{r1bVbQbrnYH1 znmrEFS0ibks0?Z5Vt=Dr2r=GIkEfFrdaOub>@k_hl=FNvskR-Gvq`N;I(&>ccd^83*` z*vU|uMwg56kl69NmJ1^TWHTvA{SdmR7Nl^T+g~Z-i!`A_hiEEx?`BqCP%g9QDT}-c zE1?z%>{+W*@~}mA+deA(Yl`lW6Ee@PbQKdN!B13C5`CnZH^%A29MvQ{R+e4N$l4Oz z*ywQ*e5X3~1RuSBzqJRlwzl|+9^FLL9N-f>nm{zzaWyEnZpf?1OEZ~i(v-wb5HxC( z(72YaB_$XsYOq9)FSL`Sy%__&S{(Q48Du}(^~c>W4$5FBG@VK54$KJP8xnNuL)>a) zAM@9!iMiD$P5->m9h5qg33&d(^!LoHCB$bV0Tb;t|8o;xN_|#NX5ka|$%z#U(@(z> wu}R!0ZU@f2fd}$5|9D;K|MwsIXZO?|XSvs(io_rf?@V!Z literal 0 HcmV?d00001 diff --git a/scripts/sql/233_mailmaster.down.sql b/scripts/sql/233_mailmaster.down.sql new file mode 100644 index 0000000000..8bbb7268f3 --- /dev/null +++ b/scripts/sql/233_mailmaster.down.sql @@ -0,0 +1,5 @@ +DELETE FROM plugin_step_variable WHERE plugin_step_id =(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false); +DELETE FROM plugin_step WHERE plugin_id=(SELECT id FROM plugin_metadata WHERE name='Mail Master v1.0.0'); +DELETE FROM pipeline_stage_step_variable where pipeline_stage_step_id in (select id from pipeline_stage_step where name ='Mail Master v1.0.0'); +DELETE from pipeline_stage_step where name ='Mail Master v1.0.0'; +DELETE FROM plugin_metadata WHERE name ='Mail Master v1.0.0'; diff --git a/scripts/sql/233_mailmaster.up.sql b/scripts/sql/233_mailmaster.up.sql new file mode 100644 index 0000000000..cbe5ea9525 --- /dev/null +++ b/scripts/sql/233_mailmaster.up.sql @@ -0,0 +1,96 @@ +INSERT INTO plugin_metadata (id,name,description,type,icon,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_metadata'),'Mail Master v1.0.0','The Plugin is designed for sending bulk emails directly through your preferred SMTP server.','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/MailMaster.png',false,'now()',1,'now()',1); + +INSERT INTO plugin_stage_mapping (id,plugin_id,stage_type,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_stage_mapping'),(SELECT id from plugin_metadata where name='Mail Master v1.0.0'), 0,'now()',1,'now()',1); + +INSERT INTO "plugin_pipeline_script" ("id", "script","type","deleted","created_on", "created_by", "updated_on", "updated_by") +VALUES ( + nextval('id_seq_plugin_pipeline_script'), + $$#!/bin/sh +set -eo pipefail + +#!/bin/bash + +HoldTime=${BatchDelayTime:-1} +BatchSize=${BatchSize:-10} +SmtpPort=${SmtpPort:-587} +EMAIL_FILE_EXT="${EmailContentFile##*.}" +DIR=$(pwd) + +CONFIG_FILE_ARGS="" +if [ -n "$RecipientConfigFile" ]; then + CONFIG_FILE_ARGS="-e RecipientConfigFile=/app/config.json -v $DIR/$RecipientConfigFile:/app/config.json" +fi + +docker run \ + -e SmtpServer="$SmtpServer" \ + -e SmtpPort="$SmtpPort" \ + -e SmtpUsername="$SmtpUsername" \ + -e SmtpPassword="$SmtpPassword" \ + -e SenderEmail="$SenderEmail" \ + -e Subject="$EmailSubject" \ + -e EmailContentFile="/app/email_content.$EMAIL_FILE_EXT" \ + -e SenderName="$SenderName" \ + -e RecipientsGroupName="$RecipientsGroupName" \ + -e RecipientsSubGroupName="$RecipientsSubGroupName" \ + -e Recipients="$Recipients" \ + -e BatchSize="$BatchSize" \ + -e HoldTime="$BatchDelayTime" \ + -v "$DIR/$EmailContentFile:/app/email_content.$EMAIL_FILE_EXT" \ + $CONFIG_FILE_ARGS \ + irawal007/mailmaster:v1.0 + + $$, + 'SHELL', + 'f', + 'now()', + 1, + 'now()', + 1 +); + +INSERT INTO "plugin_step" ("id", "plugin_id","name","description","index","step_type","script_id","deleted", "created_on", "created_by", "updated_on", "updated_by") +VALUES (nextval('id_seq_plugin_step'), (SELECT id FROM plugin_metadata WHERE name='Mail Master v1.0.0'),'Step 1','Step 1 - Mail Master v1.0.0','1','INLINE',(SELECT last_value FROM id_seq_plugin_pipeline_script),'f','now()', 1, 'now()', 1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SmtpServer','STRING','The Hostname of SMTP Server','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SmtpUsername','STRING','The Username for the SMTP connection.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SmtpPassword','STRING','The Password for the SMTP connection.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SenderEmail','STRING','Sender Email Address.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'EmailContentFile','STRING','Enter the path to file whose contents is to be send in Email.','t','f',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SmtpPort','NUMBER','Port Number to use for SMTP connection.Defaults to 587 if not set','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'RecipientConfigFile','STRING','Enter the path to config.json file which contains the list of Recipients','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'EmailSubject','STRING','Subject of the email. Can either be specified here or in the first line of EmailContentFile after "Subejct:". ','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'BatchSize','NUMBER','Number of Emails to be sent per Batch. Defaults to 10 if not set','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'BatchDelayTime','NUMBER','Time to wait (in seconds) before scheduling the next batch','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'SenderName','STRING','Name of Email Sender','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'RecipientsGroupName','STRING','The Group id for selecting recipients in the config file.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'RecipientsSubGroupName','STRING','The SubGroup id for selecting recipients in the JSON configuration file.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); + +INSERT INTO plugin_step_variable (id,plugin_step_id,name,format,description,is_exposed,allow_empty_value,default_value,value,variable_type,value_type,previous_step_index,variable_step_index,variable_step_index_in_plugin,reference_variable_name,deleted,created_on,created_by,updated_on,updated_by) +VALUES (nextval('id_seq_plugin_step_variable'),(SELECT ps.id FROM plugin_metadata p inner JOIN plugin_step ps on ps.plugin_id=p.id WHERE p.name='Mail Master v1.0.0' and ps."index"=1 and ps.deleted=false),'Recipients','STRING','The emails of recipients separated by "," if user has not provided config file.','t','t',null,null,'INPUT','NEW',null,1,null,null,'f','now()',1,'now()',1); From 907142d372d1839d6139f0be3b39667cd06d57b9 Mon Sep 17 00:00:00 2001 From: Prakash Date: Tue, 2 Apr 2024 19:35:54 +0530 Subject: [PATCH 24/29] return nil,err on git material fetch error (#4857) --- pkg/pipeline/CiHandler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index 0222e888e1..bf9c9e3bdb 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -430,7 +430,7 @@ func (impl *CiHandlerImpl) FetchMaterialsByPipelineId(pipelineId int, showAll bo impl.Logger.Debugw("commits for material ", "m", m, "commits: ", changesResp) if apiErr != nil { impl.Logger.Warnw("git sensor FetchChanges failed for material", "id", m.Id) - return []pipelineConfig.CiPipelineMaterialResponse{}, apiErr + return nil, apiErr } ciMaterialHistoryMap[m] = changesResp } @@ -459,7 +459,7 @@ func (impl *CiHandlerImpl) FetchMaterialsByPipelineId(pipelineId int, showAll bo regexMaterials, err := impl.ciPipelineMaterialRepository.GetRegexByPipelineId(pipelineId) if err != nil { impl.Logger.Errorw("regex ciMaterials fetch failed", "err", err) - return []pipelineConfig.CiPipelineMaterialResponse{}, err + return nil, err } for _, k := range regexMaterials { r := pipelineConfig.CiPipelineMaterialResponse{ From 9cb524d1c325719242b4cc88744a5e4303a9a809 Mon Sep 17 00:00:00 2001 From: Prakash Date: Wed, 3 Apr 2024 19:17:57 +0530 Subject: [PATCH 25/29] removed code for gitops repo migration in devtron apps (#4838) Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> --- pkg/app/ManifestPushService.go | 2 +- pkg/app/bean/ManifestPushTemplate.go | 37 +++++++++---------- .../trigger/devtronApps/TriggerService.go | 24 ------------ 3 files changed, 19 insertions(+), 44 deletions(-) diff --git a/pkg/app/ManifestPushService.go b/pkg/app/ManifestPushService.go index 252596551a..5ec6e1da37 100644 --- a/pkg/app/ManifestPushService.go +++ b/pkg/app/ManifestPushService.go @@ -106,7 +106,7 @@ func (impl *GitOpsManifestPushServiceImpl) PushChart(manifestPushTemplate *bean. return manifestPushResponse } // 3. Create Git Repo if required - if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) || manifestPushTemplate.GitOpsRepoMigrationRequired { + if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) { overRiddenGitRepoUrl, errMsg := impl.migrateRepoForGitOperation(*manifestPushTemplate, ctx) if errMsg != nil { manifestPushResponse.Error = errMsg diff --git a/pkg/app/bean/ManifestPushTemplate.go b/pkg/app/bean/ManifestPushTemplate.go index 64188cbe25..7a9f28ac90 100644 --- a/pkg/app/bean/ManifestPushTemplate.go +++ b/pkg/app/bean/ManifestPushTemplate.go @@ -6,25 +6,24 @@ const WORKFLOW_EXIST_ERROR = "workflow with this name already exist in this app" const Workflows = "workflows" type ManifestPushTemplate struct { - WorkflowRunnerId int - AppId int - ChartRefId int - EnvironmentId int - EnvironmentName string - UserId int32 - PipelineOverrideId int - AppName string - TargetEnvironmentName int - ChartReferenceTemplate string - ChartName string - ChartVersion string - ChartLocation string - RepoUrl string - IsCustomGitRepository bool - GitOpsRepoMigrationRequired bool - BuiltChartPath string - BuiltChartBytes *[]byte - MergedValues string + WorkflowRunnerId int + AppId int + ChartRefId int + EnvironmentId int + EnvironmentName string + UserId int32 + PipelineOverrideId int + AppName string + TargetEnvironmentName int + ChartReferenceTemplate string + ChartName string + ChartVersion string + ChartLocation string + RepoUrl string + IsCustomGitRepository bool + BuiltChartPath string + BuiltChartBytes *[]byte + MergedValues string } type ManifestPushResponse struct { diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index 81b21d1852..82edf18e7d 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -839,34 +839,10 @@ func (impl *TriggerServiceImpl) buildManifestPushTemplate(overrideRequest *bean3 manifestPushTemplate.ChartLocation = valuesOverrideResponse.EnvOverride.Chart.ChartLocation manifestPushTemplate.RepoUrl = valuesOverrideResponse.EnvOverride.Chart.GitRepoUrl manifestPushTemplate.IsCustomGitRepository = valuesOverrideResponse.EnvOverride.Chart.IsCustomGitRepository - manifestPushTemplate.GitOpsRepoMigrationRequired = impl.checkIfRepoMigrationRequired(manifestPushTemplate) } return manifestPushTemplate, err } -// checkIfRepoMigrationRequired checks if gitOps repo name is changed -func (impl *TriggerServiceImpl) checkIfRepoMigrationRequired(manifestPushTemplate *bean4.ManifestPushTemplate) bool { - monoRepoMigrationRequired := false - if gitOps.IsGitOpsRepoNotConfigured(manifestPushTemplate.RepoUrl) || manifestPushTemplate.IsCustomGitRepository { - return false - } - var err error - gitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoNameFromUrl(manifestPushTemplate.RepoUrl) - if len(gitOpsRepoName) == 0 { - gitOpsRepoName, err = impl.getAcdAppGitOpsRepoName(manifestPushTemplate.AppName, manifestPushTemplate.EnvironmentName) - if err != nil || gitOpsRepoName == "" { - return false - } - } - //here will set new git repo name if required to migrate - newGitOpsRepoName := impl.gitOpsConfigReadService.GetGitOpsRepoName(manifestPushTemplate.AppName) - //checking weather git repo migration needed or not, if existing git repo and new independent git repo is not same than go ahead with migration - if newGitOpsRepoName != gitOpsRepoName { - monoRepoMigrationRequired = true - } - return monoRepoMigrationRequired -} - // getAcdAppGitOpsRepoName returns the GitOps repository name, configured for the argoCd app func (impl *TriggerServiceImpl) getAcdAppGitOpsRepoName(appName string, environmentName string) (string, error) { //this method should only call in case of argo-integration and gitops configured From 6b74445e94e79b102c42cc88efda5a9290808f08 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu <111440205+gireesh-devtron@users.noreply.github.com> Date: Wed, 3 Apr 2024 21:02:21 +0530 Subject: [PATCH 26/29] fix: update argo app repo url in patch (#4876) --- client/argocdServer/ArgoClientWrapperService.go | 11 ++++++++++- pkg/deployment/trigger/devtronApps/TriggerService.go | 5 ++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/client/argocdServer/ArgoClientWrapperService.go b/client/argocdServer/ArgoClientWrapperService.go index d12281c10d..a6d4cf0c61 100644 --- a/client/argocdServer/ArgoClientWrapperService.go +++ b/client/argocdServer/ArgoClientWrapperService.go @@ -22,7 +22,7 @@ import ( ) type ACDConfig struct { - ArgoCDAutoSyncEnabled bool `env:"ARGO_AUTO_SYNC_ENABLED" envDefault:"true"` //will gradually switch this flag to false in enterprise + ArgoCDAutoSyncEnabled bool `env:"ARGO_AUTO_SYNC_ENABLED" envDefault:"true"` // will gradually switch this flag to false in enterprise } func GetACDDeploymentConfig() (*ACDConfig, error) { @@ -54,6 +54,9 @@ type ArgoClientWrapperService interface { // PatchArgoCdApp performs a patch operation on an argoCd app PatchArgoCdApp(ctx context.Context, dto *bean.ArgoCdAppPatchReqDto) error + // IsArgoAppPatchRequired decides weather the v1alpha1.ApplicationSource requires to be updated + IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentChartPath string) bool + // GetGitOpsRepoName returns the GitOps repository name, configured for the argoCd app GetGitOpsRepoName(ctx context.Context, appName string) (gitOpsRepoName string, err error) } @@ -180,6 +183,12 @@ func (impl *ArgoClientWrapperServiceImpl) GetArgoAppByName(ctx context.Context, return argoApplication, nil } +func (impl *ArgoClientWrapperServiceImpl) IsArgoAppPatchRequired(argoAppSpec *v1alpha1.ApplicationSource, currentGitRepoUrl, currentChartPath string) bool { + return (len(currentGitRepoUrl) != 0 && argoAppSpec.RepoURL != currentGitRepoUrl) || + argoAppSpec.Path != currentChartPath || + argoAppSpec.TargetRevision != bean.TargetRevisionMaster +} + func (impl *ArgoClientWrapperServiceImpl) PatchArgoCdApp(ctx context.Context, dto *bean.ArgoCdAppPatchReqDto) error { patchReq := adapter.GetArgoCdPatchReqFromDto(dto) reqbyte, err := json.Marshal(patchReq) diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index 82edf18e7d..ea99e5dd62 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -1084,7 +1084,7 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(pipeline *pipelineConfig.Pipe appStatus, _ := status2.FromError(err) if appStatus.Code() == codes.OK { impl.logger.Debugw("argo app exists", "app", argoAppName, "pipeline", pipeline.Name) - if argoApplication.Spec.Source.Path != envOverride.Chart.ChartLocation || argoApplication.Spec.Source.TargetRevision != "master" { + if impl.argoClientWrapperService.IsArgoAppPatchRequired(argoApplication.Spec.Source, envOverride.Chart.GitRepoUrl, envOverride.Chart.ChartLocation) { patchRequestDto := &bean7.ArgoCdAppPatchReqDto{ ArgoAppName: argoAppName, ChartLocation: envOverride.Chart.ChartLocation, @@ -1097,6 +1097,9 @@ func (impl *TriggerServiceImpl) updateArgoPipeline(pipeline *pipelineConfig.Pipe impl.logger.Errorw("error in patching argo pipeline", "err", err, "req", patchRequestDto) return false, err } + if envOverride.Chart.GitRepoUrl != argoApplication.Spec.Source.RepoURL { + impl.logger.Infow("patching argo application's repo url", "argoAppName", argoAppName) + } impl.logger.Debugw("pipeline update req", "res", patchRequestDto) } else { impl.logger.Debug("pipeline no need to update ") From 0b45f25637168e293cf94f0d0e73294d15bf6c9a Mon Sep 17 00:00:00 2001 From: Gireesh Naidu <111440205+gireesh-devtron@users.noreply.github.com> Date: Wed, 3 Apr 2024 23:19:53 +0530 Subject: [PATCH 27/29] fix: injected app-serveice dependency into DeployedApplicationEventProcessor service (#4875) --- .../in/DeployedApplicationEventProcessorService.go | 11 +++++++---- wire_gen.go | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go index acdf437f53..b967262a7d 100644 --- a/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go +++ b/pkg/eventProcessor/in/DeployedApplicationEventProcessorService.go @@ -42,7 +42,9 @@ type DeployedApplicationEventProcessorImpl struct { installedAppRepository repository.InstalledAppRepository } -func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, pubSubClient *pubsub.PubSubClientServiceImpl, +func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, + pubSubClient *pubsub.PubSubClientServiceImpl, + appService app.AppService, gitOpsConfigReadService config.GitOpsConfigReadService, installedAppService FullMode.InstalledAppDBExtendedService, workflowDagExecutor dag.WorkflowDagExecutor, @@ -54,6 +56,7 @@ func NewDeployedApplicationEventProcessorImpl(logger *zap.SugaredLogger, pubSubC deployedApplicationEventProcessorImpl := &DeployedApplicationEventProcessorImpl{ logger: logger, pubSubClient: pubSubClient, + appService: appService, gitOpsConfigReadService: gitOpsConfigReadService, installedAppService: installedAppService, workflowDagExecutor: workflowDagExecutor, @@ -86,10 +89,10 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppUpdate() erro _, err = impl.pipelineRepository.GetArgoPipelineByArgoAppName(app.ObjectMeta.Name) if err != nil && err == pg.ErrNoRows { impl.logger.Infow("this app not found in pipeline table looking in installed_apps table", "appName", app.ObjectMeta.Name) - //if not found in pipeline table then search in installed_apps table + // if not found in pipeline table then search in installed_apps table installedAppModel, err := impl.installedAppRepository.GetInstalledAppByGitOpsAppName(app.ObjectMeta.Name) if err == pg.ErrNoRows { - //no installed_apps found + // no installed_apps found impl.logger.Errorw("no installed apps found", "err", err) return } @@ -98,7 +101,7 @@ func (impl *DeployedApplicationEventProcessorImpl) SubscribeArgoAppUpdate() erro return } if installedAppModel.Id > 0 { - //app found in installed_apps table hence setting flag to true + // app found in installed_apps table hence setting flag to true isAppStoreApplication = true } else { // app neither found in installed_apps nor in pipeline table hence returning diff --git a/wire_gen.go b/wire_gen.go index 14280cc724..0ab4d0301d 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -889,7 +889,7 @@ func InitializeApp() (*App, error) { } ciPipelineEventProcessorImpl := in.NewCIPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, gitWebhookServiceImpl) cdPipelineEventProcessorImpl := in.NewCDPipelineEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, cdWorkflowCommonServiceImpl, workflowStatusServiceImpl, triggerServiceImpl, argoUserServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl) - deployedApplicationEventProcessorImpl := in.NewDeployedApplicationEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, gitOpsConfigReadServiceImpl, installedAppDBExtendedServiceImpl, workflowDagExecutorImpl, cdWorkflowCommonServiceImpl, pipelineBuilderImpl, appStoreDeploymentServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl) + deployedApplicationEventProcessorImpl := in.NewDeployedApplicationEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, appServiceImpl, gitOpsConfigReadServiceImpl, installedAppDBExtendedServiceImpl, workflowDagExecutorImpl, cdWorkflowCommonServiceImpl, pipelineBuilderImpl, appStoreDeploymentServiceImpl, pipelineRepositoryImpl, installedAppRepositoryImpl) appStoreAppsEventProcessorImpl := in.NewAppStoreAppsEventProcessorImpl(sugaredLogger, pubSubClientServiceImpl, chartGroupServiceImpl, installedAppVersionHistoryRepositoryImpl) centralEventProcessor, err := eventProcessor.NewCentralEventProcessor(sugaredLogger, workflowEventProcessorImpl, ciPipelineEventProcessorImpl, cdPipelineEventProcessorImpl, deployedApplicationEventProcessorImpl, appStoreAppsEventProcessorImpl) if err != nil { From 606a33bc5e9b15d8b1c404f0dfbd1c70b21b4063 Mon Sep 17 00:00:00 2001 From: Shashwat Dadhich <92629050+ShashwatDadhich@users.noreply.github.com> Date: Thu, 4 Apr 2024 11:54:09 +0530 Subject: [PATCH 28/29] fix: depandabot version upgrade (#4792) * dependabot version upgrade * dependabot version upgrade * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * argo assets deletion reverted --- api/restHandler/ExternalCiRestHandler.go | 2 +- go.mod | 55 +- go.sum | 376 +- .../deployment/InstalledAppGitOpsService.go | 2 + .../Masterminds/semver/v3/.golangci.yml | 3 - .../github.com/Masterminds/semver/v3/Makefile | 17 +- .../Masterminds/semver/v3/README.md | 22 +- .../Masterminds/semver/v3/SECURITY.md | 19 + .../Masterminds/semver/v3/constraints.go | 2 +- .../github.com/Masterminds/semver/v3/fuzz.go | 22 - vendor/github.com/antonmedv/expr/.gitignore | 1 + vendor/github.com/antonmedv/expr/.travis.yml | 3 - vendor/github.com/antonmedv/expr/README.md | 49 +- vendor/github.com/antonmedv/expr/ast/node.go | 48 +- vendor/github.com/antonmedv/expr/ast/print.go | 6 +- .../github.com/antonmedv/expr/ast/visitor.go | 94 +- .../antonmedv/expr/builtin/builtin.go | 101 + .../antonmedv/expr/checker/checker.go | 807 +- .../antonmedv/expr/checker/types.go | 263 +- .../antonmedv/expr/compiler/compiler.go | 432 +- .../antonmedv/expr/compiler/patcher.go | 44 - .../github.com/antonmedv/expr/conf/config.go | 107 +- .../antonmedv/expr/conf/functions.go | 1 + .../conf/{operators_table.go => operators.go} | 35 +- .../antonmedv/expr/conf/types_table.go | 37 +- vendor/github.com/antonmedv/expr/expr.go | 142 +- .../github.com/antonmedv/expr/file/error.go | 11 + .../github.com/antonmedv/expr/file/source.go | 19 - .../antonmedv/expr/optimizer/const_expr.go | 82 +- .../antonmedv/expr/optimizer/const_range.go | 3 +- .../antonmedv/expr/optimizer/fold.go | 292 +- .../antonmedv/expr/optimizer/in_array.go | 5 +- .../antonmedv/expr/optimizer/in_range.go | 11 +- .../antonmedv/expr/optimizer/optimizer.go | 4 +- .../antonmedv/expr/parser/lexer/lexer.go | 19 +- .../antonmedv/expr/parser/lexer/state.go | 84 +- .../antonmedv/expr/parser/parser.go | 210 +- .../github.com/antonmedv/expr/vm/generated.go | 262 + .../github.com/antonmedv/expr/vm/helpers.go | 3247 ------- .../github.com/antonmedv/expr/vm/opcodes.go | 39 +- .../github.com/antonmedv/expr/vm/program.go | 167 +- .../github.com/antonmedv/expr/vm/runtime.go | 370 - .../antonmedv/expr/vm/runtime/generated.go | 3288 +++++++ .../antonmedv/expr/vm/runtime/runtime.go | 517 ++ vendor/github.com/antonmedv/expr/vm/vm.go | 373 +- .../argoproj/argo-cd/v2/common/common.go | 66 +- .../argoproj/argo-cd/v2/common/version.go | 4 + .../apiclient/application/application.pb.go | 1382 ++- .../application/application.pb.gw.go | 141 + .../v2/pkg/apiclient/version/version.pb.go | 104 +- .../v1alpha1/applicationset_types.go | 183 +- .../apis/application/v1alpha1/generated.pb.go | 7898 +++++++++++++---- .../apis/application/v1alpha1/generated.proto | 217 +- .../application/v1alpha1/openapi_generated.go | 789 +- .../v2/pkg/apis/application/v1alpha1/types.go | 393 +- .../pkg/apis/application/v1alpha1/values.go | 61 + .../v1alpha1/zz_generated.deepcopy.go | 467 +- .../v2/reposerver/apiclient/clientset.go | 10 +- .../v2/reposerver/apiclient/repository.pb.go | 2341 ++++- .../argoproj/argo-cd/v2/util/cache/cache.go | 12 +- .../argoproj/argo-cd/v2/util/cache/redis.go | 35 +- .../argo-cd/v2/util/cache/redis_hook.go | 34 +- .../argoproj/argo-cd/v2/util/config/reader.go | 2 +- .../argoproj/argo-cd/v2/util/env/env.go | 26 +- .../argoproj/argo-cd/v2/util/git/client.go | 72 +- .../argoproj/argo-cd/v2/util/grpc/grpc.go | 3 +- .../argoproj/argo-cd/v2/util/grpc/trace.go | 33 + .../argoproj/argo-cd/v2/util/helm/client.go | 198 +- .../argoproj/argo-cd/v2/util/helm/cmd.go | 26 +- .../argoproj/argo-cd/v2/util/helm/helm.go | 7 +- .../argoproj/argo-cd/v2/util/helm/tags.go | 6 +- .../argoproj/argo-cd/v2/util/io/files/tar.go | 15 +- .../argoproj/argo-cd/v2/util/kube/kube.go | 60 +- .../argo-cd/v2/util/kube/portforwarder.go | 3 +- .../argoproj/argo-cd/v2/util/security/rbac.go | 4 +- .../argo-cd/v2/util/settings/settings.go | 154 +- .../argoproj/argo-cd/v2/util/text/text.go | 18 - .../bmatcuk/doublestar/v4/.codecov.yml | 10 + .../bmatcuk/doublestar/v4/.gitignore | 32 + .../github.com/bmatcuk/doublestar/v4/LICENSE | 22 + .../bmatcuk/doublestar/v4/README.md | 404 + .../bmatcuk/doublestar/v4/UPGRADING.md | 63 + .../bmatcuk/doublestar/v4/doublestar.go | 13 + .../github.com/bmatcuk/doublestar/v4/glob.go | 473 + .../bmatcuk/doublestar/v4/globoptions.go | 144 + .../bmatcuk/doublestar/v4/globwalk.go | 414 + .../github.com/bmatcuk/doublestar/v4/match.go | 376 + .../github.com/bmatcuk/doublestar/v4/utils.go | 147 + .../bmatcuk/doublestar/v4/validate.go | 82 + .../bradleyfalzon/ghinstallation/v2/README.md | 18 +- .../ghinstallation/v2/appsTransport.go | 41 +- .../bradleyfalzon/ghinstallation/v2/sign.go | 33 + .../ghinstallation/v2/transport.go | 35 +- .../cloudflare/circl/ecc/goldilocks/twist.go | 2 +- .../cloudflare/circl/internal/sha3/keccakf.go | 12 +- .../cloudflare/circl/internal/sha3/sha3.go | 11 +- .../cloudflare/circl/internal/sha3/shake.go | 40 + .../cloudflare/circl/math/primes.go | 34 + .../cloudflare/circl/sign/ed25519/ed25519.go | 2 +- .../authenticator/client/k8sClient.go | 4 + .../github.com/go-errors/errors/.travis.yml | 3 + vendor/github.com/go-errors/errors/README.md | 16 + vendor/github.com/go-errors/errors/cover.out | 89 - vendor/github.com/go-errors/errors/error.go | 32 +- .../github.com/go-errors/errors/error_1_13.go | 31 + .../go-errors/errors/error_backward.go | 57 + .../github.com/go-errors/errors/stackframe.go | 38 +- .../{redis/v8 => cache/v9}/.prettierrc.yml | 0 .../go-redis/cache/{v8 => v9}/CHANGELOG.md | 0 .../go-redis/cache/{v8 => v9}/LICENSE | 0 .../go-redis/cache/{v8 => v9}/Makefile | 0 .../go-redis/cache/{v8 => v9}/README.md | 25 +- .../go-redis/cache/{v8 => v9}/cache.go | 5 +- .../go-redis/cache/{v8 => v9}/local.go | 4 +- .../github.com/go-redis/redis/v8/CHANGELOG.md | 177 - vendor/github.com/go-redis/redis/v8/Makefile | 35 - .../github.com/go-redis/redis/v8/command.go | 3478 -------- .../redis/v8/internal/proto/reader.go | 332 - .../go-redis/redis/v8/internal/safe.go | 12 - .../go-redis/redis/v8/internal/unsafe.go | 21 - vendor/github.com/go-redis/redis/v8/redis.go | 773 -- .../golang/protobuf/jsonpb/decode.go | 1 + .../golang/protobuf/jsonpb/encode.go | 1 + .../protoc-gen-go/descriptor/descriptor.pb.go | 128 +- .../github.com/golang/protobuf/ptypes/any.go | 7 +- vendor/github.com/google/btree/.travis.yml | 1 - vendor/github.com/google/btree/README.md | 2 - vendor/github.com/google/btree/btree.go | 3 + .../github.com/google/btree/btree_generic.go | 1083 +++ .../google/gnostic/jsonschema/display.go | 17 +- .../google/gnostic/jsonschema/models.go | 8 +- .../google/gnostic/jsonschema/reader.go | 1 - .../google/gnostic/jsonschema/writer.go | 30 +- .../google/gnostic/openapiv2/OpenAPIv2.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.go | 7 +- .../google/gnostic/openapiv3/OpenAPIv3.pb.go | 13 +- .../google/gnostic/openapiv3/OpenAPIv3.proto | 2 +- .../google/gnostic/openapiv3/README.md | 4 + .../gnostic/openapiv3/annotations.pb.go | 183 + .../gnostic/openapiv3/annotations.proto | 60 + .../go-github/v45/github/orgs_audit_log.go | 116 - .../go-github/v45/github/orgs_custom_roles.go | 46 - .../google/go-github/{v45 => v53}/AUTHORS | 78 +- .../google/go-github/{v45 => v53}/LICENSE | 0 .../go-github/{v45 => v53}/github/actions.go | 0 .../{v45 => v53}/github/actions_artifacts.go | 36 +- .../go-github/v53/github/actions_cache.go | 235 + .../go-github/v53/github/actions_oidc.go | 73 + .../v53/github/actions_required_workflows.go | 247 + .../github/actions_runner_groups.go | 31 +- .../{v45 => v53}/github/actions_runners.go | 0 .../{v45 => v53}/github/actions_secrets.go | 2 +- .../go-github/v53/github/actions_variables.go | 293 + .../github/actions_workflow_jobs.go | 4 + .../github/actions_workflow_runs.go | 75 +- .../{v45 => v53}/github/actions_workflows.go | 11 +- .../go-github/{v45 => v53}/github/activity.go | 17 +- .../{v45 => v53}/github/activity_events.go | 0 .../github/activity_notifications.go | 10 +- .../{v45 => v53}/github/activity_star.go | 0 .../{v45 => v53}/github/activity_watching.go | 0 .../go-github/{v45 => v53}/github/admin.go | 0 .../{v45 => v53}/github/admin_orgs.go | 0 .../{v45 => v53}/github/admin_stats.go | 0 .../{v45 => v53}/github/admin_users.go | 0 .../go-github/{v45 => v53}/github/apps.go | 35 +- .../{v45 => v53}/github/apps_hooks.go | 0 .../github/apps_hooks_deliveries.go | 0 .../{v45 => v53}/github/apps_installation.go | 0 .../{v45 => v53}/github/apps_manifest.go | 0 .../{v45 => v53}/github/apps_marketplace.go | 12 + .../{v45 => v53}/github/authorizations.go | 0 .../go-github/{v45 => v53}/github/billing.go | 28 +- .../go-github/{v45 => v53}/github/checks.go | 0 .../{v45 => v53}/github/code-scanning.go | 46 + .../{v45 => v53}/github/dependabot.go | 0 .../go-github/v53/github/dependabot_alerts.go | 135 + .../{v45 => v53}/github/dependabot_secrets.go | 26 +- .../go-github/{v45 => v53}/github/doc.go | 15 +- .../{v45 => v53}/github/enterprise.go | 0 .../github/enterprise_actions_runners.go | 19 + .../github/enterprise_audit_log.go | 0 .../enterprise_code_security_and_analysis.go | 78 + .../go-github/{v45 => v53}/github/event.go | 11 +- .../{v45 => v53}/github/event_types.go | 101 +- .../go-github/{v45 => v53}/github/gists.go | 4 +- .../{v45 => v53}/github/gists_comments.go | 3 +- .../go-github/{v45 => v53}/github/git.go | 0 .../{v45 => v53}/github/git_blobs.go | 0 .../{v45 => v53}/github/git_commits.go | 5 +- .../go-github/{v45 => v53}/github/git_refs.go | 2 +- .../go-github/{v45 => v53}/github/git_tags.go | 0 .../{v45 => v53}/github/git_trees.go | 0 .../{v45 => v53}/github/github-accessors.go | 2904 +++++- .../go-github/{v45 => v53}/github/github.go | 258 +- .../{v45 => v53}/github/gitignore.go | 0 .../{v45 => v53}/github/interactions.go | 0 .../{v45 => v53}/github/interactions_orgs.go | 0 .../{v45 => v53}/github/interactions_repos.go | 0 .../{v45 => v53}/github/issue_import.go | 15 +- .../go-github/{v45 => v53}/github/issues.go | 30 +- .../{v45 => v53}/github/issues_assignees.go | 0 .../{v45 => v53}/github/issues_comments.go | 4 +- .../{v45 => v53}/github/issues_events.go | 3 +- .../{v45 => v53}/github/issues_labels.go | 0 .../{v45 => v53}/github/issues_milestones.go | 9 +- .../{v45 => v53}/github/issues_timeline.go | 9 +- .../go-github/{v45 => v53}/github/licenses.go | 0 .../go-github/{v45 => v53}/github/messages.go | 72 +- .../{v45 => v53}/github/migrations.go | 0 .../github/migrations_source_import.go | 0 .../{v45 => v53}/github/migrations_user.go | 6 +- .../go-github/{v45 => v53}/github/misc.go | 8 + .../go-github/{v45 => v53}/github/orgs.go | 38 +- .../github/orgs_actions_allowed.go | 0 .../github/orgs_actions_permissions.go | 0 .../go-github/v53/github/orgs_audit_log.go | 148 + .../go-github/v53/github/orgs_custom_roles.go | 120 + .../{v45 => v53}/github/orgs_hooks.go | 0 .../github/orgs_hooks_deliveries.go | 0 .../{v45 => v53}/github/orgs_members.go | 4 +- .../github/orgs_outside_collaborators.go | 0 .../{v45 => v53}/github/orgs_packages.go | 2 +- .../{v45 => v53}/github/orgs_projects.go | 0 .../v53/github/orgs_security_managers.go | 57 + .../github/orgs_users_blocking.go | 0 .../go-github/{v45 => v53}/github/packages.go | 0 .../go-github/{v45 => v53}/github/projects.go | 0 .../go-github/{v45 => v53}/github/pulls.go | 9 +- .../{v45 => v53}/github/pulls_comments.go | 4 +- .../{v45 => v53}/github/pulls_reviewers.go | 0 .../{v45 => v53}/github/pulls_reviews.go | 35 +- .../{v45 => v53}/github/pulls_threads.go | 0 .../{v45 => v53}/github/reactions.go | 0 .../go-github/{v45 => v53}/github/repos.go | 438 +- .../v53/github/repos_actions_access.go | 55 + .../github/repos_actions_allowed.go | 0 .../github/repos_actions_permissions.go | 0 .../{v45 => v53}/github/repos_autolinks.go | 12 +- .../go-github/v53/github/repos_codeowners.go | 46 + .../github/repos_collaborators.go | 7 + .../{v45 => v53}/github/repos_comments.go | 5 +- .../{v45 => v53}/github/repos_commits.go | 0 .../github/repos_community_health.go | 3 +- .../{v45 => v53}/github/repos_contents.go | 8 +- .../repos_deployment_branch_policies.go | 123 + .../{v45 => v53}/github/repos_deployments.go | 0 .../{v45 => v53}/github/repos_environments.go | 41 + .../{v45 => v53}/github/repos_forks.go | 13 +- .../{v45 => v53}/github/repos_hooks.go | 60 +- .../github/repos_hooks_deliveries.go | 0 .../{v45 => v53}/github/repos_invitations.go | 0 .../{v45 => v53}/github/repos_keys.go | 0 .../google/go-github/v53/github/repos_lfs.go | 49 + .../{v45 => v53}/github/repos_merging.go | 0 .../{v45 => v53}/github/repos_pages.go | 74 +- .../github/repos_prereceive_hooks.go | 0 .../{v45 => v53}/github/repos_projects.go | 0 .../{v45 => v53}/github/repos_releases.go | 22 +- .../{v45 => v53}/github/repos_stats.go | 0 .../{v45 => v53}/github/repos_statuses.go | 5 +- .../google/go-github/v53/github/repos_tags.go | 76 + .../{v45 => v53}/github/repos_traffic.go | 0 .../go-github/{v45 => v53}/github/scim.go | 64 +- .../go-github/{v45 => v53}/github/search.go | 28 +- .../{v45 => v53}/github/secret_scanning.go | 8 + .../go-github/{v45 => v53}/github/strings.go | 0 .../go-github/{v45 => v53}/github/teams.go | 25 +- .../github/teams_discussion_comments.go | 0 .../{v45 => v53}/github/teams_discussions.go | 0 .../{v45 => v53}/github/teams_members.go | 0 .../{v45 => v53}/github/timestamp.go | 8 + .../go-github/{v45 => v53}/github/users.go | 4 +- .../github/users_administration.go | 0 .../{v45 => v53}/github/users_blocking.go | 0 .../{v45 => v53}/github/users_emails.go | 25 + .../{v45 => v53}/github/users_followers.go | 0 .../{v45 => v53}/github/users_gpg_keys.go | 5 +- .../{v45 => v53}/github/users_keys.go | 2 + .../{v45 => v53}/github/users_packages.go | 0 .../{v45 => v53}/github/users_projects.go | 0 .../v53/github/users_ssh_signing_keys.go | 108 + .../{v45 => v53}/github/with_appengine.go | 0 .../{v45 => v53}/github/without_appengine.go | 0 .../go-grpc-middleware/.travis.yml | 16 - .../go-grpc-middleware/CHANGELOG.md | 51 - .../go-grpc-middleware/README.md | 55 +- .../go-grpc-middleware/chain.go | 130 +- .../go-grpc-middleware/retry/doc.go | 2 +- .../go-grpc-middleware/retry/retry.go | 14 +- .../util/metautils/nicemd.go | 6 +- .../hashicorp/go-retryablehttp/CHANGELOG.md | 9 + .../hashicorp/go-retryablehttp/CODEOWNERS | 1 + .../hashicorp/go-retryablehttp/LICENSE | 2 + .../hashicorp/go-retryablehttp/client.go | 108 +- .../go-retryablehttp/roundtripper.go | 3 + .../github.com/imdario/mergo/CONTRIBUTING.md | 112 + vendor/github.com/imdario/mergo/README.md | 25 +- vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/map.go | 6 +- vendor/github.com/imdario/mergo/merge.go | 59 +- vendor/github.com/imdario/mergo/mergo.go | 11 +- .../ktrysmt/go-bitbucket/bitbucket.go | 7 +- .../go-bitbucket/branchrestrictions.go | 2 +- .../github.com/ktrysmt/go-bitbucket/client.go | 28 +- .../ktrysmt/go-bitbucket/commits.go | 6 +- .../ktrysmt/go-bitbucket/downloads.go | 2 +- .../github.com/ktrysmt/go-bitbucket/issues.go | 2 +- .../ktrysmt/go-bitbucket/pipelines.go | 4 +- .../ktrysmt/go-bitbucket/pullrequests.go | 10 +- .../ktrysmt/go-bitbucket/repositories.go | 24 +- .../ktrysmt/go-bitbucket/repository.go | 45 +- .../ktrysmt/go-bitbucket/webhooks.go | 4 +- .../ktrysmt/go-bitbucket/workspaces.go | 10 +- .../opencontainers/image-spec/LICENSE | 191 + .../image-spec/specs-go/v1/annotations.go | 68 + .../image-spec/specs-go/v1/config.go | 111 + .../image-spec/specs-go/v1/descriptor.go | 72 + .../image-spec/specs-go/v1/index.go | 32 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 49 + .../image-spec/specs-go/v1/mediatype.go | 75 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + .../client_golang/api/prometheus/v1/api.go | 396 +- .../collectors/go_collector_latest.go | 2 + .../client_golang/prometheus/counter.go | 26 +- .../client_golang/prometheus/desc.go | 46 +- .../client_golang/prometheus/doc.go | 44 +- .../client_golang/prometheus/gauge.go | 26 +- .../prometheus/go_collector_latest.go | 7 +- .../client_golang/prometheus/histogram.go | 61 +- .../client_golang/prometheus/labels.go | 72 + .../client_golang/prometheus/metric.go | 6 +- .../client_golang/prometheus/promauto/auto.go | 28 +- .../client_golang/prometheus/promhttp/http.go | 19 +- .../prometheus/promhttp/instrument_client.go | 26 +- .../prometheus/promhttp/instrument_server.go | 101 +- .../prometheus/promhttp/option.go | 38 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 39 +- .../client_golang/prometheus/timer.go | 28 +- .../client_golang/prometheus/value.go | 10 +- .../client_golang/prometheus/vec.go | 79 +- .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 8 +- .../prometheus/common/expfmt/decode.go | 36 +- .../prometheus/common/expfmt/fuzz.go | 4 +- .../common/expfmt/openmetrics_create.go | 22 +- .../prometheus/common/expfmt/text_parse.go | 10 +- .../bitbucket.org/ww/goautoneg/autoneg.go | 22 +- .../prometheus/common/model/time.go | 89 +- .../prometheus/common/model/value.go | 246 +- .../prometheus/common/model/value_float.go | 100 + .../common/model/value_histogram.go | 178 + .../prometheus/common/model/value_type.go | 83 + .../redis/v8 => redis/go-redis/v9}/.gitignore | 2 +- .../v8 => redis/go-redis/v9}/.golangci.yml | 0 .../redis/go-redis/v9/.prettierrc.yml | 4 + .../github.com/redis/go-redis/v9/CHANGELOG.md | 124 + .../redis/v8 => redis/go-redis/v9}/LICENSE | 2 +- vendor/github.com/redis/go-redis/v9/Makefile | 41 + .../redis/v8 => redis/go-redis/v9}/README.md | 84 +- .../v8 => redis/go-redis/v9}/RELEASING.md | 0 .../redis/v8 => redis/go-redis/v9}/cluster.go | 537 +- .../go-redis/v9}/cluster_commands.go | 12 +- .../github.com/redis/go-redis/v9/command.go | 5168 +++++++++++ .../v8 => redis/go-redis/v9}/commands.go | 926 +- .../redis/v8 => redis/go-redis/v9}/doc.go | 0 .../redis/v8 => redis/go-redis/v9}/error.go | 17 +- .../v8 => redis/go-redis/v9}/internal/arg.go | 4 +- .../go-redis/v9}/internal/hashtag/hashtag.go | 2 +- .../go-redis/v9}/internal/hscan/hscan.go | 6 + .../go-redis/v9}/internal/hscan/structmap.go | 30 +- .../go-redis/v9}/internal/internal.go | 2 +- .../v8 => redis/go-redis/v9}/internal/log.go | 0 .../v8 => redis/go-redis/v9}/internal/once.go | 7 +- .../go-redis/v9}/internal/pool/conn.go | 18 +- .../go-redis/v9/internal/pool/conn_check.go | 50 + .../v9/internal/pool/conn_check_dummy.go | 10 + .../go-redis/v9}/internal/pool/pool.go | 205 +- .../go-redis/v9}/internal/pool/pool_single.go | 0 .../go-redis/v9}/internal/pool/pool_sticky.go | 0 .../go-redis/v9/internal/proto/reader.go | 552 ++ .../go-redis/v9}/internal/proto/scan.go | 7 +- .../go-redis/v9}/internal/proto/writer.go | 11 +- .../go-redis/v9}/internal/rand/rand.go | 0 .../v8 => redis/go-redis/v9}/internal/util.go | 2 +- .../go-redis/v9}/internal/util/safe.go | 0 .../go-redis/v9}/internal/util/strconv.go | 0 .../go-redis/v9}/internal/util/unsafe.go | 0 .../v8 => redis/go-redis/v9}/iterator.go | 13 +- .../redis/v8 => redis/go-redis/v9}/options.go | 256 +- .../v8 => redis/go-redis/v9}/package.json | 4 +- .../v8 => redis/go-redis/v9}/pipeline.go | 74 +- .../redis/v8 => redis/go-redis/v9}/pubsub.go | 95 +- vendor/github.com/redis/go-redis/v9/redis.go | 827 ++ .../redis/v8 => redis/go-redis/v9}/result.go | 22 +- .../redis/v8 => redis/go-redis/v9}/ring.go | 442 +- .../redis/v8 => redis/go-redis/v9}/script.go | 23 +- .../v8 => redis/go-redis/v9}/sentinel.go | 338 +- .../redis/v8 => redis/go-redis/v9}/tx.go | 56 +- .../v8 => redis/go-redis/v9}/universal.go | 108 +- .../redis/v8 => redis/go-redis/v9}/version.go | 2 +- .../vmihailenco/go-tinylfu/tinylfu.go | 19 +- vendor/github.com/xanzy/go-gitlab/.gitignore | 5 + .../github.com/xanzy/go-gitlab/.golangci.yml | 12 +- .../xanzy/go-gitlab/CONTRIBUTING.md | 52 + vendor/github.com/xanzy/go-gitlab/Makefile | 22 + vendor/github.com/xanzy/go-gitlab/README.md | 17 +- .../xanzy/go-gitlab/access_requests.go | 36 +- .../xanzy/go-gitlab/applications.go | 12 +- .../xanzy/go-gitlab/audit_events.go | 24 +- .../xanzy/go-gitlab/award_emojis.go | 72 +- vendor/github.com/xanzy/go-gitlab/boards.go | 94 +- vendor/github.com/xanzy/go-gitlab/branches.go | 41 +- .../xanzy/go-gitlab/broadcast_messages.go | 80 +- .../xanzy/go-gitlab/ci_yml_templates.go | 27 +- .../xanzy/go-gitlab/client_options.go | 40 +- .../xanzy/go-gitlab/cluster_agents.go | 294 + vendor/github.com/xanzy/go-gitlab/commits.go | 118 +- .../xanzy/go-gitlab/container_registry.go | 21 +- .../xanzy/go-gitlab/custom_attributes.go | 34 +- .../github.com/xanzy/go-gitlab/deploy_keys.go | 36 +- .../xanzy/go-gitlab/deploy_tokens.go | 40 +- .../github.com/xanzy/go-gitlab/deployments.go | 20 +- .../go-gitlab/deployments_merge_requests.go | 53 + .../github.com/xanzy/go-gitlab/discussions.go | 184 +- .../xanzy/go-gitlab/dockerfile_templates.go | 93 + .../xanzy/go-gitlab/environments.go | 37 +- .../github.com/xanzy/go-gitlab/epic_issues.go | 8 +- vendor/github.com/xanzy/go-gitlab/epics.go | 12 +- .../xanzy/go-gitlab/error_tracking.go | 196 + .../xanzy/go-gitlab/event_parsing.go | 94 +- .../xanzy/go-gitlab/event_systemhook_types.go | 120 +- .../xanzy/go-gitlab/event_webhook_types.go | 577 +- vendor/github.com/xanzy/go-gitlab/events.go | 109 +- .../xanzy/go-gitlab/external_status_checks.go | 113 +- .../xanzy/go-gitlab/feature_flags.go | 14 +- .../xanzy/go-gitlab/freeze_periods.go | 28 +- .../xanzy/go-gitlab/generic_packages.go | 2 +- .../github.com/xanzy/go-gitlab/geo_nodes.go | 14 +- .../xanzy/go-gitlab/gitignore_templates.go | 26 +- vendor/github.com/xanzy/go-gitlab/gitlab.go | 334 +- .../xanzy/go-gitlab/group_access_tokens.go | 41 +- .../xanzy/go-gitlab/group_badges.go | 10 +- .../xanzy/go-gitlab/group_boards.go | 52 +- .../xanzy/go-gitlab/group_clusters.go | 8 +- .../github.com/xanzy/go-gitlab/group_hooks.go | 36 +- .../xanzy/go-gitlab/group_import_export.go | 10 +- .../xanzy/go-gitlab/group_iterations.go | 2 +- .../xanzy/go-gitlab/group_labels.go | 34 +- .../xanzy/go-gitlab/group_members.go | 81 +- .../xanzy/go-gitlab/group_milestones.go | 46 +- .../xanzy/go-gitlab/group_variables.go | 11 +- .../github.com/xanzy/go-gitlab/group_wikis.go | 32 +- vendor/github.com/xanzy/go-gitlab/groups.go | 365 +- .../xanzy/go-gitlab/instance_clusters.go | 8 +- .../xanzy/go-gitlab/instance_variables.go | 11 +- vendor/github.com/xanzy/go-gitlab/invites.go | 17 +- .../github.com/xanzy/go-gitlab/issue_links.go | 84 +- vendor/github.com/xanzy/go-gitlab/issues.go | 274 +- .../xanzy/go-gitlab/issues_statistics.go | 8 +- vendor/github.com/xanzy/go-gitlab/jobs.go | 161 +- vendor/github.com/xanzy/go-gitlab/keys.go | 2 +- vendor/github.com/xanzy/go-gitlab/labels.go | 38 +- vendor/github.com/xanzy/go-gitlab/license.go | 24 +- .../xanzy/go-gitlab/license_templates.go | 16 +- .../go-gitlab/merge_request_approvals.go | 20 +- .../xanzy/go-gitlab/merge_requests.go | 117 +- vendor/github.com/xanzy/go-gitlab/metadata.go | 63 + .../github.com/xanzy/go-gitlab/milestones.go | 40 +- .../github.com/xanzy/go-gitlab/namespaces.go | 51 +- vendor/github.com/xanzy/go-gitlab/notes.go | 86 +- .../xanzy/go-gitlab/notifications.go | 30 +- vendor/github.com/xanzy/go-gitlab/packages.go | 57 +- .../xanzy/go-gitlab/pages_domains.go | 43 +- .../xanzy/go-gitlab/personal_access_tokens.go | 136 + .../xanzy/go-gitlab/pipeline_schedules.go | 121 +- .../xanzy/go-gitlab/pipeline_triggers.go | 38 +- .../github.com/xanzy/go-gitlab/pipelines.go | 127 +- .../github.com/xanzy/go-gitlab/plan_limits.go | 4 +- .../xanzy/go-gitlab/project_access_tokens.go | 58 +- .../xanzy/go-gitlab/project_badges.go | 10 +- .../xanzy/go-gitlab/project_clusters.go | 8 +- .../xanzy/go-gitlab/project_feature_flags.go | 240 + .../xanzy/go-gitlab/project_import_export.go | 29 +- .../xanzy/go-gitlab/project_iterations.go | 6 +- .../go-gitlab/project_managed_licenses.go | 8 +- .../xanzy/go-gitlab/project_members.go | 39 +- .../xanzy/go-gitlab/project_mirror.go | 64 +- .../xanzy/go-gitlab/project_snippets.go | 51 +- .../xanzy/go-gitlab/project_templates.go | 110 + .../xanzy/go-gitlab/project_variables.go | 29 +- .../go-gitlab/project_vulnerabilities.go | 150 + vendor/github.com/xanzy/go-gitlab/projects.go | 564 +- .../xanzy/go-gitlab/protected_branches.go | 90 +- .../xanzy/go-gitlab/protected_environments.go | 68 +- .../xanzy/go-gitlab/protected_tags.go | 23 +- .../xanzy/go-gitlab/releaselinks.go | 26 +- vendor/github.com/xanzy/go-gitlab/releases.go | 64 +- .../xanzy/go-gitlab/repositories.go | 129 +- .../xanzy/go-gitlab/repository_files.go | 116 +- .../xanzy/go-gitlab/repository_submodules.go | 2 +- .../xanzy/go-gitlab/request_options.go | 19 + .../xanzy/go-gitlab/resource_label_events.go | 12 +- .../go-gitlab/resource_milestone_events.go | 155 + .../xanzy/go-gitlab/resource_state_events.go | 8 +- .../xanzy/go-gitlab/resource_weight_events.go | 80 + vendor/github.com/xanzy/go-gitlab/runners.go | 235 +- vendor/github.com/xanzy/go-gitlab/search.go | 53 +- vendor/github.com/xanzy/go-gitlab/services.go | 438 +- vendor/github.com/xanzy/go-gitlab/settings.go | 1035 ++- .../xanzy/go-gitlab/sidekiq_metrics.go | 26 +- vendor/github.com/xanzy/go-gitlab/snippets.go | 142 +- vendor/github.com/xanzy/go-gitlab/strings.go | 1 - .../xanzy/go-gitlab/system_hooks.go | 26 +- vendor/github.com/xanzy/go-gitlab/tags.go | 39 +- .../github.com/xanzy/go-gitlab/time_stats.go | 28 +- vendor/github.com/xanzy/go-gitlab/todos.go | 14 +- vendor/github.com/xanzy/go-gitlab/topics.go | 11 +- vendor/github.com/xanzy/go-gitlab/types.go | 91 +- vendor/github.com/xanzy/go-gitlab/users.go | 285 +- vendor/github.com/xanzy/go-gitlab/validate.go | 35 +- vendor/github.com/xanzy/go-gitlab/version.go | 12 +- vendor/github.com/xanzy/go-gitlab/wikis.go | 50 +- .../internal/compile/compile.go | 341 +- .../internal/compile/serial.go | 22 +- vendor/go.starlark.net/resolve/resolve.go | 49 +- vendor/go.starlark.net/starlark/eval.go | 207 +- vendor/go.starlark.net/starlark/hashtable.go | 4 +- vendor/go.starlark.net/starlark/int.go | 294 +- .../go.starlark.net/starlark/int_generic.go | 33 + .../go.starlark.net/starlark/int_posix64.go | 77 + vendor/go.starlark.net/starlark/interp.go | 54 +- vendor/go.starlark.net/starlark/library.go | 417 +- vendor/go.starlark.net/starlark/unpack.go | 135 +- vendor/go.starlark.net/starlark/value.go | 309 +- .../go.starlark.net/starlarkstruct/struct.go | 2 +- vendor/go.starlark.net/syntax/parse.go | 9 +- vendor/go.starlark.net/syntax/quote.go | 202 +- vendor/go.starlark.net/syntax/scan.go | 72 +- vendor/go.starlark.net/syntax/syntax.go | 8 +- vendor/golang.org/x/exp/rand/exp.go | 221 - vendor/golang.org/x/exp/rand/normal.go | 156 - vendor/golang.org/x/exp/rand/rand.go | 372 - vendor/golang.org/x/exp/rand/rng.go | 91 - vendor/golang.org/x/exp/rand/zipf.go | 77 - vendor/golang.org/x/sys/unix/aliases.go | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 39 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 12 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 99 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 90 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../golang.org/x/sys/unix/zsyscall_linux.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 2 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 2 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2 - .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 185 +- .../golang.org/x/sys/windows/env_windows.go | 17 +- .../x/sys/windows/syscall_windows.go | 4 +- .../x/sys/windows/zsyscall_windows.go | 9 + .../protobuf/encoding/protojson/decode.go | 38 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 39 +- .../encoding/protojson/well_known_types.go | 59 +- .../protobuf/encoding/prototext/decode.go | 8 +- .../protobuf/encoding/prototext/encode.go | 4 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 183 +- .../internal/editiondefaults/defaults.go | 12 + .../editiondefaults/editions_defaults.binpb | 4 + .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/filedesc/desc.go | 102 +- .../protobuf/internal/filedesc/desc_init.go | 52 + .../protobuf/internal/filedesc/desc_lazy.go | 28 + .../protobuf/internal/filedesc/editions.go | 142 + .../protobuf/internal/genid/descriptor_gen.go | 364 +- .../internal/genid/go_features_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 5 + .../protobuf/internal/genid/type_gen.go | 38 + .../protobuf/internal/impl/codec_extension.go | 22 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/codec_tables.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 19 +- .../protobuf/internal/impl/message.go | 17 +- .../internal/impl/message_reflect_field.go | 2 +- .../protobuf/internal/impl/pointer_reflect.go | 36 + .../protobuf/internal/impl/pointer_unsafe.go | 40 + .../protobuf/internal/strs/strings.go | 2 +- ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 2 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 56 + .../reflect/protodesc/desc_resolve.go | 4 +- .../reflect/protodesc/desc_validate.go | 6 +- .../protobuf/reflect/protodesc/editions.go | 148 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/proto.go | 85 +- .../reflect/protoreflect/source_gen.go | 64 +- .../protobuf/reflect/protoreflect/type.go | 44 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_union.go | 44 +- ...{value_unsafe.go => value_unsafe_go120.go} | 4 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 24 +- .../types/descriptorpb/descriptor.pb.go | 2475 ++++-- .../types/gofeaturespb/go_features.pb.go | 177 + .../types/gofeaturespb/go_features.proto | 28 + .../protobuf/types/known/anypb/any.pb.go | 3 +- vendor/modules.txt | 120 +- vendor/oras.land/oras-go/v2/LICENSE | 201 + .../oras-go/v2/content/descriptor.go | 40 + vendor/oras.land/oras-go/v2/content/graph.go | 107 + .../oras-go/v2/content/limitedstorage.go | 50 + vendor/oras.land/oras-go/v2/content/reader.go | 141 + .../oras.land/oras-go/v2/content/resolver.go | 41 + .../oras.land/oras-go/v2/content/storage.go | 80 + vendor/oras.land/oras-go/v2/errdef/errors.go | 30 + .../oras-go/v2/internal/cas/memory.go | 88 + .../oras-go/v2/internal/cas/proxy.go | 125 + .../v2/internal/descriptor/descriptor.go | 89 + .../oras-go/v2/internal/docker/mediatype.go | 24 + .../oras-go/v2/internal/httputil/seek.go | 116 + .../oras-go/v2/internal/ioutil/io.go | 58 + .../oras-go/v2/internal/registryutil/auth.go | 29 + .../oras-go/v2/internal/registryutil/proxy.go | 102 + .../oras-go/v2/internal/slices/slice.go | 24 + .../oras-go/v2/internal/spec/artifact.go | 49 + .../oras-go/v2/internal/syncutil/limit.go | 84 + .../v2/internal/syncutil/limitgroup.go | 67 + .../oras-go/v2/internal/syncutil/merge.go | 140 + .../oras-go/v2/internal/syncutil/once.go | 70 + .../oras-go/v2/internal/syncutil/pool.go | 64 + .../oras-go/v2/registry/reference.go | 269 + .../oras.land/oras-go/v2/registry/registry.go | 52 + .../oras-go/v2/registry/remote/auth/cache.go | 159 + .../v2/registry/remote/auth/challenge.go | 167 + .../oras-go/v2/registry/remote/auth/client.go | 419 + .../v2/registry/remote/auth/credential.go | 40 + .../oras-go/v2/registry/remote/auth/scope.go | 236 + .../v2/registry/remote/errcode/errors.go | 128 + .../remote/internal/errutil/errutil.go | 54 + .../oras-go/v2/registry/remote/manifest.go | 59 + .../oras-go/v2/registry/remote/referrers.go | 224 + .../oras-go/v2/registry/remote/registry.go | 175 + .../oras-go/v2/registry/remote/repository.go | 1533 ++++ .../v2/registry/remote/retry/client.go | 114 + .../v2/registry/remote/retry/policy.go | 154 + .../oras-go/v2/registry/remote/url.go | 119 + .../oras-go/v2/registry/remote/utils.go | 94 + .../oras-go/v2/registry/repository.go | 133 + 694 files changed, 59243 insertions(+), 21385 deletions(-) create mode 100644 vendor/github.com/Masterminds/semver/v3/SECURITY.md delete mode 100644 vendor/github.com/Masterminds/semver/v3/fuzz.go delete mode 100644 vendor/github.com/antonmedv/expr/.travis.yml create mode 100644 vendor/github.com/antonmedv/expr/builtin/builtin.go delete mode 100644 vendor/github.com/antonmedv/expr/compiler/patcher.go create mode 100644 vendor/github.com/antonmedv/expr/conf/functions.go rename vendor/github.com/antonmedv/expr/conf/{operators_table.go => operators.go} (59%) create mode 100644 vendor/github.com/antonmedv/expr/vm/generated.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/helpers.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/runtime.go create mode 100644 vendor/github.com/antonmedv/expr/vm/runtime/generated.go create mode 100644 vendor/github.com/antonmedv/expr/vm/runtime/runtime.go create mode 100644 vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go create mode 100644 vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go delete mode 100644 vendor/github.com/argoproj/argo-cd/v2/util/text/text.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/.gitignore create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/LICENSE create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/README.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/doublestar.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/glob.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/globoptions.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/globwalk.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/match.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/utils.go create mode 100644 vendor/github.com/bmatcuk/doublestar/v4/validate.go create mode 100644 vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go create mode 100644 vendor/github.com/cloudflare/circl/math/primes.go delete mode 100644 vendor/github.com/go-errors/errors/cover.out create mode 100644 vendor/github.com/go-errors/errors/error_1_13.go create mode 100644 vendor/github.com/go-errors/errors/error_backward.go rename vendor/github.com/go-redis/{redis/v8 => cache/v9}/.prettierrc.yml (100%) rename vendor/github.com/go-redis/cache/{v8 => v9}/CHANGELOG.md (100%) rename vendor/github.com/go-redis/cache/{v8 => v9}/LICENSE (100%) rename vendor/github.com/go-redis/cache/{v8 => v9}/Makefile (100%) rename vendor/github.com/go-redis/cache/{v8 => v9}/README.md (67%) rename vendor/github.com/go-redis/cache/{v8 => v9}/cache.go (98%) rename vendor/github.com/go-redis/cache/{v8 => v9}/local.go (93%) delete mode 100644 vendor/github.com/go-redis/redis/v8/CHANGELOG.md delete mode 100644 vendor/github.com/go-redis/redis/v8/Makefile delete mode 100644 vendor/github.com/go-redis/redis/v8/command.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/proto/reader.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/safe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/internal/unsafe.go delete mode 100644 vendor/github.com/go-redis/redis/v8/redis.go delete mode 100644 vendor/github.com/google/btree/.travis.yml create mode 100644 vendor/github.com/google/btree/btree_generic.go create mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.pb.go create mode 100644 vendor/github.com/google/gnostic/openapiv3/annotations.proto delete mode 100644 vendor/github.com/google/go-github/v45/github/orgs_audit_log.go delete mode 100644 vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go rename vendor/github.com/google/go-github/{v45 => v53}/AUTHORS (82%) rename vendor/github.com/google/go-github/{v45 => v53}/LICENSE (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_artifacts.go (75%) create mode 100644 vendor/github.com/google/go-github/v53/github/actions_cache.go create mode 100644 vendor/github.com/google/go-github/v53/github/actions_oidc.go create mode 100644 vendor/github.com/google/go-github/v53/github/actions_required_workflows.go rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_runner_groups.go (87%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_runners.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_secrets.go (99%) create mode 100644 vendor/github.com/google/go-github/v53/github/actions_variables.go rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_workflow_jobs.go (95%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_workflow_runs.go (80%) rename vendor/github.com/google/go-github/{v45 => v53}/github/actions_workflows.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/activity.go (83%) rename vendor/github.com/google/go-github/{v45 => v53}/github/activity_events.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/activity_notifications.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/activity_star.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/activity_watching.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/admin.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/admin_orgs.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/admin_stats.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/admin_users.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps.go (90%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps_hooks.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps_installation.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps_manifest.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/apps_marketplace.go (91%) rename vendor/github.com/google/go-github/{v45 => v53}/github/authorizations.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/billing.go (87%) rename vendor/github.com/google/go-github/{v45 => v53}/github/checks.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/code-scanning.go (86%) rename vendor/github.com/google/go-github/{v45 => v53}/github/dependabot.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/dependabot_alerts.go rename vendor/github.com/google/go-github/{v45 => v53}/github/dependabot_secrets.go (86%) rename vendor/github.com/google/go-github/{v45 => v53}/github/doc.go (97%) rename vendor/github.com/google/go-github/{v45 => v53}/github/enterprise.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/enterprise_actions_runners.go (75%) rename vendor/github.com/google/go-github/{v45 => v53}/github/enterprise_audit_log.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go rename vendor/github.com/google/go-github/{v45 => v53}/github/event.go (93%) rename vendor/github.com/google/go-github/{v45 => v53}/github/event_types.go (92%) rename vendor/github.com/google/go-github/{v45 => v53}/github/gists.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/gists_comments.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git_blobs.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git_commits.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git_refs.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git_tags.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/git_trees.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/github-accessors.go (87%) rename vendor/github.com/google/go-github/{v45 => v53}/github/github.go (83%) rename vendor/github.com/google/go-github/{v45 => v53}/github/gitignore.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/interactions.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/interactions_orgs.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/interactions_repos.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issue_import.go (91%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues.go (93%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_assignees.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_comments.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_events.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_labels.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_milestones.go (95%) rename vendor/github.com/google/go-github/{v45 => v53}/github/issues_timeline.go (95%) rename vendor/github.com/google/go-github/{v45 => v53}/github/licenses.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/messages.go (85%) rename vendor/github.com/google/go-github/{v45 => v53}/github/migrations.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/migrations_source_import.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/migrations_user.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/misc.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs.go (83%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_actions_allowed.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_actions_permissions.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/orgs_audit_log.go create mode 100644 vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_hooks.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_members.go (99%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_outside_collaborators.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_packages.go (97%) rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_projects.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/orgs_security_managers.go rename vendor/github.com/google/go-github/{v45 => v53}/github/orgs_users_blocking.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/packages.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/projects.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/pulls.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/pulls_comments.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/pulls_reviewers.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/pulls_reviews.go (94%) rename vendor/github.com/google/go-github/{v45 => v53}/github/pulls_threads.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/reactions.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos.go (76%) create mode 100644 vendor/github.com/google/go-github/v53/github/repos_actions_access.go rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_actions_allowed.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_actions_permissions.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_autolinks.go (89%) create mode 100644 vendor/github.com/google/go-github/v53/github/repos_codeowners.go rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_collaborators.go (95%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_comments.go (97%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_commits.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_community_health.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_contents.go (98%) create mode 100644 vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_deployments.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_environments.go (77%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_forks.go (92%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_hooks.go (77%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_hooks_deliveries.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_invitations.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_keys.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/repos_lfs.go rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_merging.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_pages.go (67%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_prereceive_hooks.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_projects.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_releases.go (95%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_stats.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_statuses.go (97%) create mode 100644 vendor/github.com/google/go-github/v53/github/repos_tags.go rename vendor/github.com/google/go-github/{v45 => v53}/github/repos_traffic.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/scim.go (78%) rename vendor/github.com/google/go-github/{v45 => v53}/github/search.go (93%) rename vendor/github.com/google/go-github/{v45 => v53}/github/secret_scanning.go (94%) rename vendor/github.com/google/go-github/{v45 => v53}/github/strings.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/teams.go (97%) rename vendor/github.com/google/go-github/{v45 => v53}/github/teams_discussion_comments.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/teams_discussions.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/teams_members.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/timestamp.go (90%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users.go (98%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_administration.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_blocking.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_emails.go (73%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_followers.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_gpg_keys.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_keys.go (96%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_packages.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/users_projects.go (100%) create mode 100644 vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go rename vendor/github.com/google/go-github/{v45 => v53}/github/with_appengine.go (100%) rename vendor/github.com/google/go-github/{v45 => v53}/github/without_appengine.go (100%) delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 vendor/github.com/prometheus/common/model/value_float.go create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 vendor/github.com/prometheus/common/model/value_type.go rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/.gitignore (52%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/.golangci.yml (100%) create mode 100644 vendor/github.com/redis/go-redis/v9/.prettierrc.yml create mode 100644 vendor/github.com/redis/go-redis/v9/CHANGELOG.md rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/LICENSE (95%) create mode 100644 vendor/github.com/redis/go-redis/v9/Makefile rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/README.md (55%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/RELEASING.md (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/cluster.go (74%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/cluster_commands.go (85%) create mode 100644 vendor/github.com/redis/go-redis/v9/command.go rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/commands.go (79%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/doc.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/error.go (86%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/arg.go (92%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/hashtag/hashtag.go (98%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/hscan/hscan.go (96%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/hscan/structmap.go (75%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/internal.go (89%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/log.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/once.go (94%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/pool/conn.go (82%) create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go create mode 100644 vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/pool/pool.go (72%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/pool/pool_single.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/pool/pool_sticky.go (100%) create mode 100644 vendor/github.com/redis/go-redis/v9/internal/proto/reader.go rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/proto/scan.go (97%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/proto/writer.go (92%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/rand/rand.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/util.go (92%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/util/safe.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/util/strconv.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/internal/util/unsafe.go (100%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/iterator.go (84%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/options.go (58%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/package.json (64%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/pipeline.go (74%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/pubsub.go (86%) create mode 100644 vendor/github.com/redis/go-redis/v9/redis.go rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/result.go (87%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/ring.go (61%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/script.go (64%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/sentinel.go (69%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/tx.go (79%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/universal.go (67%) rename vendor/github.com/{go-redis/redis/v8 => redis/go-redis/v9}/version.go (83%) create mode 100644 vendor/github.com/xanzy/go-gitlab/CONTRIBUTING.md create mode 100644 vendor/github.com/xanzy/go-gitlab/Makefile create mode 100644 vendor/github.com/xanzy/go-gitlab/cluster_agents.go create mode 100644 vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go create mode 100644 vendor/github.com/xanzy/go-gitlab/dockerfile_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/error_tracking.go create mode 100644 vendor/github.com/xanzy/go-gitlab/metadata.go create mode 100644 vendor/github.com/xanzy/go-gitlab/personal_access_tokens.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_feature_flags.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_templates.go create mode 100644 vendor/github.com/xanzy/go-gitlab/project_vulnerabilities.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go create mode 100644 vendor/github.com/xanzy/go-gitlab/resource_weight_events.go create mode 100644 vendor/go.starlark.net/starlark/int_generic.go create mode 100644 vendor/go.starlark.net/starlark/int_posix64.go delete mode 100644 vendor/golang.org/x/exp/rand/exp.go delete mode 100644 vendor/golang.org/x/exp/rand/normal.go delete mode 100644 vendor/golang.org/x/exp/rand/rand.go delete mode 100644 vendor/golang.org/x/exp/rand/rng.go delete mode 100644 vendor/golang.org/x/exp/rand/zipf.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go create mode 100644 vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto create mode 100644 vendor/oras.land/oras-go/v2/LICENSE create mode 100644 vendor/oras.land/oras-go/v2/content/descriptor.go create mode 100644 vendor/oras.land/oras-go/v2/content/graph.go create mode 100644 vendor/oras.land/oras-go/v2/content/limitedstorage.go create mode 100644 vendor/oras.land/oras-go/v2/content/reader.go create mode 100644 vendor/oras.land/oras-go/v2/content/resolver.go create mode 100644 vendor/oras.land/oras-go/v2/content/storage.go create mode 100644 vendor/oras.land/oras-go/v2/errdef/errors.go create mode 100644 vendor/oras.land/oras-go/v2/internal/cas/memory.go create mode 100644 vendor/oras.land/oras-go/v2/internal/cas/proxy.go create mode 100644 vendor/oras.land/oras-go/v2/internal/descriptor/descriptor.go create mode 100644 vendor/oras.land/oras-go/v2/internal/docker/mediatype.go create mode 100644 vendor/oras.land/oras-go/v2/internal/httputil/seek.go create mode 100644 vendor/oras.land/oras-go/v2/internal/ioutil/io.go create mode 100644 vendor/oras.land/oras-go/v2/internal/registryutil/auth.go create mode 100644 vendor/oras.land/oras-go/v2/internal/registryutil/proxy.go create mode 100644 vendor/oras.land/oras-go/v2/internal/slices/slice.go create mode 100644 vendor/oras.land/oras-go/v2/internal/spec/artifact.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/limit.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/limitgroup.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/merge.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/once.go create mode 100644 vendor/oras.land/oras-go/v2/internal/syncutil/pool.go create mode 100644 vendor/oras.land/oras-go/v2/registry/reference.go create mode 100644 vendor/oras.land/oras-go/v2/registry/registry.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/auth/cache.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/auth/challenge.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/auth/client.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/auth/credential.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/auth/scope.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/internal/errutil/errutil.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/manifest.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/referrers.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/registry.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/repository.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/retry/client.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/retry/policy.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/url.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/utils.go create mode 100644 vendor/oras.land/oras-go/v2/registry/repository.go diff --git a/api/restHandler/ExternalCiRestHandler.go b/api/restHandler/ExternalCiRestHandler.go index 7385d3d6af..f1ab9a2d73 100644 --- a/api/restHandler/ExternalCiRestHandler.go +++ b/api/restHandler/ExternalCiRestHandler.go @@ -19,8 +19,8 @@ package restHandler import ( "encoding/json" - "github.com/devtron-labs/devtron/pkg/workflow/dag" util3 "github.com/devtron-labs/devtron/api/util" + "github.com/devtron-labs/devtron/pkg/workflow/dag" "net/http" "strconv" diff --git a/go.mod b/go.mod index 2cb7645d2a..0a479441dd 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/Pallinder/go-randomdata v1.2.0 - github.com/argoproj/argo-cd/v2 v2.6.15 + github.com/argoproj/argo-cd/v2 v2.8.13 github.com/argoproj/argo-workflows/v3 v3.4.3 github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 github.com/aws/aws-sdk-go v1.44.290 @@ -16,9 +16,9 @@ require ( github.com/coreos/go-oidc v2.2.1+incompatible github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b - github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538 - github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb + github.com/devtron-labs/authenticator v0.4.35-0.20240321064236-a95dc8d0403f + github.com/devtron-labs/common-lib v0.0.16-0.20240326053557-bff4518a731d + github.com/devtron-labs/protos v0.0.3-0.20240326053929-48e42d9d4534 github.com/evanphx/json-patch v5.6.0+incompatible github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-git/go-billy/v5 v5.5.0 @@ -28,7 +28,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 github.com/google/go-github v17.0.0+incompatible github.com/google/uuid v1.3.1 @@ -42,20 +42,20 @@ require ( github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80 github.com/invopop/jsonschema v0.7.0 github.com/juju/errors v0.0.0-20200330140219-3fe23663418f - github.com/ktrysmt/go-bitbucket v0.9.55 + github.com/ktrysmt/go-bitbucket v0.9.60 github.com/lib/pq v1.10.4 github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 github.com/otiai10/copy v1.0.2 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.16.0 github.com/robfig/cron/v3 v3.0.1 github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 github.com/tidwall/gjson v1.14.3 github.com/tidwall/sjson v1.2.4 - github.com/xanzy/go-gitlab v0.60.0 + github.com/xanzy/go-gitlab v0.86.0 github.com/xeipuuv/gojsonschema v1.2.0 github.com/yannh/kubeconform v0.5.0 github.com/zclconf/go-cty v1.13.2 @@ -71,7 +71,7 @@ require ( golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 golang.org/x/oauth2 v0.11.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 gopkg.in/go-playground/validator.v9 v9.30.0 gopkg.in/igm/sockjs-go.v3 v3.0.0 gopkg.in/yaml.v2 v2.4.0 @@ -107,24 +107,25 @@ require ( github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/agext/levenshtein v1.2.1 // indirect - github.com/antonmedv/expr v1.9.0 // indirect + github.com/antonmedv/expr v1.12.5 // indirect github.com/apparentlymart/go-textseg v1.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e // indirect github.com/aws/smithy-go v1.14.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect - github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -136,7 +137,7 @@ require ( github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fvbommel/sortorder v1.0.1 // indirect - github.com/go-errors/errors v1.0.1 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -145,15 +146,14 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-redis/cache/v8 v8.4.2 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect + github.com/go-redis/cache/v9 v9.0.0 // indirect github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/go-xorm/xorm v0.7.9 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-github/v45 v45.2.0 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-github/v53 v53.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect @@ -163,16 +163,16 @@ require ( github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect github.com/igm/sockjs-go v3.0.0+incompatible // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/gofork v1.0.0 // indirect @@ -208,13 +208,15 @@ require ( github.com/nats-io/nuid v1.0.1 // indirect github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc.3 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.38.0 // indirect + github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect + github.com/redis/go-redis/v9 v9.0.5 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect @@ -228,7 +230,7 @@ require ( github.com/tidwall/pretty v1.2.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - github.com/vmihailenco/go-tinylfu v0.2.1 // indirect + github.com/vmihailenco/go-tinylfu v0.2.2 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -240,13 +242,13 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect go.opentelemetry.io/otel/metric v1.20.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect @@ -276,6 +278,7 @@ require ( k8s.io/kube-aggregator v0.26.4 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect mellium.im/sasl v0.3.1 // indirect + oras.land/oras-go/v2 v2.2.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect diff --git a/go.sum b/go.sum index 4eeaa4cbdb..9bd1fe1ecf 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -44,7 +45,6 @@ cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7Biccwk dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.12.0 h1:7bFXA1QB+lOK2/ASWHhp6/vnxjaeeZq6t8w1Jyp0Iaw= @@ -71,7 +71,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -80,63 +79,54 @@ github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Pallinder/go-randomdata v1.2.0 h1:DZ41wBchNRb/0GfsePLiSwb0PHZmT67XY00lCDlaYPg= github.com/Pallinder/go-randomdata v1.2.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqCQQixsA8HyRZfV9Y= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/miniredis/v2 v2.23.1 h1:jR6wZggBxwWygeXcdNyguCOCIjPsZyNUNlAkTx2fu0U= +github.com/alicebob/miniredis/v2 v2.30.3 h1:hrqDB4cHFSHQf4gO3xu6YKQg8PqJpNjLYsQAFYHstqw= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.9.0 h1:j4HI3NHEdgDnN9p6oI6Ndr0G5QryMY0FNxT4ONrFDGU= -github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E= +github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/argoproj/argo-cd/v2 v2.6.15 h1:pc8h1ggF/gx9y08w+XAhS7Jfcxj0DJwkaDe2azr5Vqg= -github.com/argoproj/argo-cd/v2 v2.6.15/go.mod h1:xaf087ms1hB9L+BHcXOP9+tztCo3/J9yzRpKBP5RXnw= +github.com/argoproj/argo-cd/v2 v2.8.13 h1:DrKWdEGVOwiyC4zgS7vB9ngP16xumA6x5/AgN/stJw0= +github.com/argoproj/argo-cd/v2 v2.8.13/go.mod h1:3GmH9xEJtiWZNfZ5hx1qdzbF2t9gIS/3vNV5vJBmI/g= github.com/argoproj/argo-workflows/v3 v3.4.3 h1:4pt7+Rjy9Lzq/r6dWp6wL8mr3ucPHSsGIlWwoP3fueM= github.com/argoproj/argo-workflows/v3 v3.4.3/go.mod h1:Od1rQK5j9/WefqFaUsIwAqTialDhLlhups0RE/WYzz4= github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 h1:oTaLRbCwjnGtScIX2ZRdIEDsiDxonwh9/BbUxdXrjYc= github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814/go.mod h1:1TchqKw9XmYYZluyEHa1dTJQoZgbV6PhabB/e8Wf3KY= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e h1:kuLQvJqwwRMQTheT4MFyKVM8Txncu21CHT4yBWUl1Mk= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e/go.mod h1:xBN5PLx2MoK63dmPfMo/PGBvd77K1Y0m/rzZOe4cs1s= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.44.290 h1:Md4+os9DQtJjow0lWLMzeJljsimD+XS2xwwHDr5Z+Lk= github.com/aws/aws-sdk-go v1.44.290/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= @@ -147,17 +137,21 @@ github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvzIZhEXc= +github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bombsimon/logrusr/v2 v2.0.1 h1:1VgxVNQMCvjirZIYaT9JYn6sAVGVEcNtRE0y4mvaOAM= github.com/bombsimon/logrusr/v2 v2.0.1/go.mod h1:ByVAX+vHdLGAfdroiMg6q0zgq2FODY2lc5YJvzmOJio= -github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA= -github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU= +github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 h1:yaYcGQ7yEIGbsJfW/9z7v1sLiZg/5rSNNXwmMct5XaE= +github.com/bradleyfalzon/ghinstallation/v2 v2.5.0/go.mod h1:amcvPQMrRkWNdueWOjPytGL25xQGzox7425qMgzo+Vo= +github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= +github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/caarlos0/env v3.5.0+incompatible h1:Yy0UN8o9Wtr/jGHZDpCBLpNrzcFLLM2yixi/rBrKyJs= github.com/caarlos0/env v3.5.0+incompatible/go.mod h1:tdCsowwCzMLdkqRYDlHpZCp2UooDD3MspDBjZ2AD02Y= @@ -165,15 +159,13 @@ github.com/caarlos0/env/v6 v6.7.2 h1:Jiy2dBHvNgCfNGMP0hOZW6jHUbiENvP+VWDtLz4n1Kg github.com/caarlos0/env/v6 v6.7.2/go.mod h1:FE0jGiAnQqtv2TenJ4KTa8+/T2Ss8kdS5s1VEjasoN0= github.com/casbin/casbin v1.9.1 h1:ucjbS5zTrmSLtH4XogqOG920Poe6QatdXtz1FEbApeM= github.com/casbin/casbin v1.9.1/go.mod h1:z8uPsfBJGUsnkagrt3G8QvjgTKFMBJ32UP8HpZllfog= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007 h1:KEBrEhQjSCzUt5bQKxX8ZbS3S46sRnzOmwemTOu+LLQ= github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007/go.mod h1:6sy40UQdWR0blO1DJdEzbcu6rcEW89odCMcEdoB1qdM= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -181,37 +173,31 @@ github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHe github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31 h1:ow7T77012NSZVW0uOWoQxz3yj9fHKYeZ4QmNrMtWMbM= github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31/go.mod h1:vSBumefK4HA5uiRSwNP+3ofgrEoScpCS2MMWcWXEuQ4= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -219,12 +205,12 @@ github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsP github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b h1:aHKsdB4ghsp+x8167W8MZyF3WQOixTSJFyARrU+qR6s= -github.com/devtron-labs/authenticator v0.4.35-0.20240216091211-80e10a80ce7b/go.mod h1:a5gxST+HNmJReXE2TkCicFQFWtlhp8eqBRwS23GydNE= -github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538 h1:KG/XRlhT3Mc066fE5qOk02kybqtnWTIsJhsUZ3gzDHc= -github.com/devtron-labs/common-lib v0.0.16-0.20240320102218-5807b1301538/go.mod h1:95/DizzVXu1kHap/VwEvdxwgd+BvPVYc0bJzt8yqGDU= -github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb h1:CkfQQgZc950/hTPqtQSiHV2RmZgkBLGCzwR02FZYjAU= -github.com/devtron-labs/protos v0.0.3-0.20240130061723-7b2e12ab0abb/go.mod h1:pjLjgoa1GzbkOkvbMyP4SAKsaiK7eG6GoQCNauG03JA= +github.com/devtron-labs/authenticator v0.4.35-0.20240321064236-a95dc8d0403f h1:TPpMSx7NZUo+CX4CBfc39q0dak8OurrQnymTupJkE0Y= +github.com/devtron-labs/authenticator v0.4.35-0.20240321064236-a95dc8d0403f/go.mod h1:JQxTCMmQisrpjzETJr0tzVadV+wW23rHEZAY7JVyK3s= +github.com/devtron-labs/common-lib v0.0.16-0.20240326053557-bff4518a731d h1:YMJPqkVb11zU6QXMovLGR3xuzV7D8Hv7fIYEcML/FUw= +github.com/devtron-labs/common-lib v0.0.16-0.20240326053557-bff4518a731d/go.mod h1:6QrrtLoLnuEdHkrx5HqkEVwMxEmnhAH1R/SJVMswuCc= +github.com/devtron-labs/protos v0.0.3-0.20240326053929-48e42d9d4534 h1:TElPRU69QedW7DIQiiQxtjwSQ6cK0fCTAMGvSLhP0ac= +github.com/devtron-labs/protos v0.0.3-0.20240326053929-48e42d9d4534/go.mod h1:ypUknVph8Ph4dxSlrFoouf7wLedQxHku2LQwgRrdgS4= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= @@ -233,12 +219,10 @@ github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 h1:7nllYTGLnq4CqBL27lV6oNfXzM2tJ2mrKF8E+aBXOV0= github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3/go.mod h1:v/MTKot4he5oRHGirOYGN4/hEOONNnWtDBLAzllSGMw= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -248,11 +232,11 @@ github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhF github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= @@ -263,27 +247,23 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwC github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= @@ -295,10 +275,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -330,14 +308,10 @@ github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-redis/cache/v8 v8.4.2 h1:8YbsmnU1Ws3TKS6T+qALzYE/MlGE+A/lrlx1XTA3p6M= -github.com/go-redis/cache/v8 v8.4.2/go.mod h1:X7Jjd69Ssbrf3xBQLtIDE0g3WcSbFoQiSGeb8QfEJ+g= -github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-redis/cache/v9 v9.0.0 h1:0thdtFo0xJi0/WXbRVu8B066z8OvVymXTJGaXrVWnN0= +github.com/go-redis/cache/v9 v9.0.0/go.mod h1:cMwi1N8ASBOufbIvk7cdXe2PbPjK/WMRL95FFHWsSgI= github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -352,19 +326,15 @@ github.com/go-xorm/xorm v0.7.9/go.mod h1:XiVxrMMIhFkwSkh96BW7PACl7UhLtx2iJIHMdmj github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -380,7 +350,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -398,15 +367,17 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -417,15 +388,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= -github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= +github.com/google/go-github/v53 v53.0.0 h1:T1RyHbSnpHYnoF0ZYKiIPSgPtuJ8G6vgc0MKodXsQDQ= +github.com/google/go-github/v53 v53.0.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -450,7 +420,6 @@ github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkj github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -464,10 +433,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY= @@ -476,63 +443,42 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= -github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80 h1:PFfGModn55JA0oBsvFghhj0v93me+Ctr3uHC/UmFAls= github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80/go.mod h1:Cxv+IJLuBiEhQ7pBYGEuORa0nr4U994pE8mYLuFd7v0= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -541,12 +487,10 @@ github.com/igm/sockjs-go v3.0.0+incompatible h1:4w5ztbp2brVLJYz+o3u0m7+zmuup6eZ/ github.com/igm/sockjs-go v3.0.0+incompatible/go.mod h1:Yu6pvqjNniWNJe07LPObeCG6R77Qc97C6Kss0roF8tU= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/invopop/jsonschema v0.7.0 h1:2vgQcBz1n256N+FpX3Jq7Y17AjYt46Ig3zIWyy770So= github.com/invopop/jsonschema v0.7.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= @@ -559,25 +503,20 @@ github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/U github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ansiterm v0.0.0-20160907234532-b99631de12cf/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c/go.mod h1:nD0vlnrUjcjJhqN5WuCWZyzfd5AHZAC9/ajvbSx69xA= github.com/juju/cmd v0.0.0-20171107070456-e74f39857ca0/go.mod h1:yWJQHl73rdSX4DHVKGqkAip+huBslxRwS8m9CrOLq18= @@ -613,10 +552,9 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -631,13 +569,14 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/ktrysmt/go-bitbucket v0.9.55 h1:eOrF7wWmG4wz5iPr7ymgyWLoti2OfmrhU2tmT6yhAu8= -github.com/ktrysmt/go-bitbucket v0.9.55/go.mod h1:y5wrrDHCGUFAtuC43GyLBeFigq7rwrh4HqeDOOyZT+A= +github.com/ktrysmt/go-bitbucket v0.9.60 h1:dw9e4COfaKjQfOxmhaEzvS+PM6ET5KKFodr/OU1DyzE= +github.com/ktrysmt/go-bitbucket v0.9.60/go.mod h1:kwdc/BFWtw1YuiJFLpY8gf7ZIRWXY3oZ+3X4i/H5SX4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= @@ -647,12 +586,7 @@ github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= -github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -669,10 +603,6 @@ github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqf github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -680,25 +610,17 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5 h1:YH424zrwLTlyHSH/GzLMJeu5zhYVZSx5RQxGKm1h96s= github.com/microsoft/azure-devops-go-api/azuredevops v1.0.0-b5/go.mod h1:PoGiBqKSQK1vIfQ+yVaFcGjDySHvym6FM1cNYnwzbrY= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.58/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -712,7 +634,6 @@ github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6U github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= @@ -724,17 +645,11 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c= github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nkeys v0.4.6 h1:IzVe95ru2CT6ta874rt9saQRkWfe2nFj1NtvYSLqMzY= github.com/nats-io/nkeys v0.4.6/go.mod h1:4DxZNzenSVd1cYQoAa8948QY3QDjrHfcfVADymtkpts= @@ -745,9 +660,6 @@ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852/go.mod h1:eqOVx5Vwu4gd2mmMZvVZsgIqNSaW3xxRThUJ0k/TPk4= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -757,50 +669,45 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opencontainers/image-spec v1.1.0-rc.3 h1:GT9Xon8YrLxz6N7sErbN81V8J4lOQKGUZQmI3ioviqU= +github.com/opencontainers/image-spec v1.1.0-rc.3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc= github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= @@ -808,60 +715,44 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a h1:wm95cDvqeISA1Vs2a56hEtERmzZ3bNrRQSuDbeZr9GU= github.com/posthog/posthog-go v0.0.0-20210610161230-cd4408afb35a/go.mod h1:oa2sAs9tGai3VldabTV0eWejt/O4/OOD7azP8GaikqU= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E= -github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= +github.com/redis/go-redis/v9 v9.0.5 h1:CuQcn5HIEeK7BgElubPP8CGtE0KakrnbBSTLjathl5o= +github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -878,32 +769,23 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -926,24 +808,21 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.4 h1:cuiLzLnaMeBhRmEv00Lpk3tkYrcxpmbU81tAY4Dw0tc= github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vmihailenco/go-tinylfu v0.2.1 h1:78/wH+STtgM8+fN2GdjvvKoxF3mkdzoOoKQTchQRj+g= -github.com/vmihailenco/go-tinylfu v0.2.1/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q= +github.com/vmihailenco/go-tinylfu v0.2.2 h1:H1eiG6HM36iniK6+21n9LLpzx1G9R3DJa2UjUjbynsI= +github.com/vmihailenco/go-tinylfu v0.2.2/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/xanzy/go-gitlab v0.60.0 h1:HaIlc14k4t9eJjAhY0Gmq2fBHgKd1MthBn3+vzDtsbA= -github.com/xanzy/go-gitlab v0.60.0/go.mod h1:F0QEXwmqiBUxCgJm8fE9S+1veX4XC9Z4cfaAbqwk4YM= +github.com/xanzy/go-gitlab v0.86.0 h1:jR8V9cK9jXRQDb46KOB20NCF3ksY09luaG0IfXE6p7w= +github.com/xanzy/go-gitlab v0.86.0/go.mod h1:5ryv+MnpZStBH8I/77HuQBsMbBGANtVpLWC15qOjWAw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -953,7 +832,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEABsc5zNT9+b1CvsJx47JzJ8g= @@ -967,16 +845,13 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -990,64 +865,47 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM= go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd h1:Uo/x0Ir5vQJ+683GXB9Ug+4fcjsbp7z7Ul8UaZbhsRM= +go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -1064,7 +922,6 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1072,7 +929,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20210901193431-a062eea981d2/go.mod h1:a3o/VtDNHN+dCVLEpzjjUHOzR+Ln3DHX056ZPzoZGGA= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -1089,32 +945,26 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1-0.20210830214625-1b1db11ec8f4/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1124,11 +974,9 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1152,6 +1000,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1161,6 +1010,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= @@ -1168,13 +1019,14 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1191,13 +1043,9 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1210,18 +1058,14 @@ golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1241,7 +1085,6 @@ golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1249,7 +1092,9 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1261,16 +1106,19 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= @@ -1282,24 +1130,25 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1307,7 +1156,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1319,8 +1167,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1328,8 +1175,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1352,13 +1197,13 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= @@ -1387,9 +1232,7 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1404,7 +1247,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1432,6 +1274,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= @@ -1440,13 +1283,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1458,6 +1297,7 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= @@ -1476,8 +1316,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1486,11 +1326,9 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v1 v1.0.0-20161222125816-442357a80af5/go.mod h1:u0ALmqvLRxLI95fkdCEWrE6mhWYZW1aMOJHp5YXLHTg= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.30.0 h1:Wk0Z37oBmKj9/n+tPyBHZmeL19LaCoK3Qq48VwYENss= @@ -1514,7 +1352,6 @@ gopkg.in/jcmturner/rpc.v0 v0.0.2/go.mod h1:NzMq6cRzR9lipgw7WxRBHNx5N8SifBuaCQsOT gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1523,7 +1360,6 @@ gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170712054546-1be3d31502d6/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1535,7 +1371,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1548,7 +1383,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.26.4 h1:qSG2PmtcD23BkYiWfoYAcak870eF/hE7NNYBYavTT94= k8s.io/api v0.26.4/go.mod h1:WwKEXU3R1rgCZ77AYa7DFksd9/BAIKyOmRlbVxgvjCk= @@ -1597,6 +1431,8 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80 launchpad.net/xmlpath v0.0.0-20130614043138-000000000004/go.mod h1:vqyExLOM3qBx7mvYRkoxjSCF945s0mbe7YynlKYXtsA= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= +oras.land/oras-go/v2 v2.2.0 h1:E1fqITD56Eg5neZbxBtAdZVgDHD6wBabJo6xESTcQyo= +oras.land/oras-go/v2 v2.2.0/go.mod h1:pXjn0+KfarspMHHNR3A56j3tgvr+mxArHuI8qVn59v8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1611,11 +1447,9 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= upper.io/db.v3 v3.8.0+incompatible h1:XNeEO2vQRVqq70M98ghzq6M30F5Bzo+99ess5v+eVYw= upper.io/db.v3 v3.8.0+incompatible/go.mod h1:FgTdD24eBjJAbPKsQSiHUNgXjOR4Lub3u1UMHSIh82Y= xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8= diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go index cb4e605fff..9c1e49c19d 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go @@ -16,6 +16,8 @@ import ( "github.com/google/go-github/github" "github.com/microsoft/azure-devops-go-api/azuredevops" "github.com/xanzy/go-gitlab" + + //"github.com/xanzy/go-gitlab" "net/http" "regexp" ) diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml index c87d1c4b90..fbc6332592 100644 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -5,12 +5,9 @@ linters: disable-all: true enable: - misspell - - structcheck - govet - staticcheck - - deadcode - errcheck - - varcheck - unparam - ineffassign - nakedret diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index eac19178fb..0e7b5c7138 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -1,7 +1,5 @@ GOPATH=$(shell go env GOPATH) GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz .PHONY: lint lint: $(GOLANGCI_LINT) @@ -19,19 +17,14 @@ test-cover: GO111MODULE=on go test -cover . .PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz +fuzz: + @echo "==> Running Fuzz Tests" + go test -fuzz=FuzzNewVersion -fuzztime=15s . + go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . + go test -fuzz=FuzzNewConstraint -fuzztime=15s . $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index d8f54dcbd3..eab8cac3b7 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -18,18 +18,20 @@ If you are looking for a command line tool for version comparisons please see ## Package Versions +Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. + There are three major versions fo the `semver` package. -* 3.x.x is the new stable and active version. This version is focused on constraint +* 3.x.x is the stable and active version. This version is focused on constraint compatibility for range handling in other tools from other languages. It has a similar API to the v1 releases. The development of this version is on the master branch. The documentation for this version is below. * 2.x was developed primarily for [dep](https://github.com/golang/dep). There are no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). +* 1.x.x is the original release. It is no longer maintained. You should use the + v3 release instead. You can read the documentation for the 1.x.x release + [here](https://github.com/Masterminds/semver/blob/release-1/README.md). ## Parsing Semantic Versions @@ -242,3 +244,15 @@ for _, m := range msgs { If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) or [create a pull request](https://github.com/Masterminds/semver/pulls). + +## Security + +Security is an important consideration for this project. The project currently +uses the following tools to help discover security issues: + +* [CodeQL](https://github.com/Masterminds/semver) +* [gosec](https://github.com/securego/gosec) +* Daily Fuzz testing + +If you believe you have found a security vulnerability you can privately disclose +it through the [GitHub security page](https://github.com/Masterminds/semver/security). diff --git a/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/vendor/github.com/Masterminds/semver/v3/SECURITY.md new file mode 100644 index 0000000000..a30a66b1f7 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +The following versions of semver are currently supported: + +| Version | Supported | +| ------- | ------------------ | +| 3.x | :white_check_mark: | +| 2.x | :x: | +| 1.x | :x: | + +Fixes are only released for the latest minor version in the form of a patch release. + +## Reporting a Vulnerability + +You can privately disclose a vulnerability through GitHubs +[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories) +mechanism. diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go index 203072e464..8461c7ed90 100644 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -586,7 +586,7 @@ func rewriteRange(i string) string { } o := i for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11]) o = strings.Replace(o, v[0], t, 1) } diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad7058..0000000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/antonmedv/expr/.gitignore b/vendor/github.com/antonmedv/expr/.gitignore index 39b3c48207..b0df3eb444 100644 --- a/vendor/github.com/antonmedv/expr/.gitignore +++ b/vendor/github.com/antonmedv/expr/.gitignore @@ -5,3 +5,4 @@ *.dylib *.test *.out +*.html diff --git a/vendor/github.com/antonmedv/expr/.travis.yml b/vendor/github.com/antonmedv/expr/.travis.yml deleted file mode 100644 index 745b115e20..0000000000 --- a/vendor/github.com/antonmedv/expr/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - 1.13.x diff --git a/vendor/github.com/antonmedv/expr/README.md b/vendor/github.com/antonmedv/expr/README.md index 00a644f048..242431f2ce 100644 --- a/vendor/github.com/antonmedv/expr/README.md +++ b/vendor/github.com/antonmedv/expr/README.md @@ -1,9 +1,9 @@ # Expr -[![Build Status](https://travis-ci.org/antonmedv/expr.svg?branch=master)](https://travis-ci.org/antonmedv/expr) +[![test](https://github.com/antonmedv/expr/actions/workflows/test.yml/badge.svg)](https://github.com/antonmedv/expr/actions/workflows/test.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/antonmedv/expr)](https://goreportcard.com/report/github.com/antonmedv/expr) [![GoDoc](https://godoc.org/github.com/antonmedv/expr?status.svg)](https://godoc.org/github.com/antonmedv/expr) -expr logo +expr logo **Expr** package provides an engine that can compile and evaluate expressions. An expression is a one-liner that returns a value (mostly, but not limited to, booleans). @@ -50,13 +50,13 @@ go get github.com/antonmedv/expr ## Documentation -* See [Getting Started](docs/Getting-Started.md) page for developer documentation. -* See [Language Definition](docs/Language-Definition.md) page to learn the syntax. +* See [Getting Started](https://expr.medv.io/docs/Getting-Started) page for developer documentation. +* See [Language Definition](https://expr.medv.io/docs/Language-Definition) page to learn the syntax. ## Expr Code Editor - - Expr Code Editor + + Expr Code Editor Also, I have an embeddable code editor written in JavaScript which allows editing expressions with syntax highlighting and autocomplete based on your types declaration. @@ -136,28 +136,25 @@ func main() { } ``` -## Contributing - -**Expr** consist of a few packages for parsing source code to AST, type checking AST, compiling to bytecode and VM for running bytecode program. - -Also expr provides powerful tool [exe](cmd/exe) for debugging. It has interactive terminal debugger for our bytecode virtual machine. - -

- debugger -

- - -## Who is using Expr? - -* Aviasales [Aviasales](https://aviasales.ru) are actively using Expr for different parts of the search engine. -* Argo [Argo Rollouts](https://argoproj.github.io/argo-rollouts/) - Progressive Delivery for Kubernetes. -* Argo [Argo Workflows](https://argoproj.github.io/argo/) - The workflow engine for KubernetesOverview. -* CrowdSec [Crowdsec](https://crowdsec.net/) - A security automation tool. -* [Mystery Minds](https://www.mysteryminds.com/en/) uses Expr to allow easy yet powerful customization of its matching algorithm. -* [qiniu](https://www.qiniu.com/) qiniu cloud use Expr in trade systems. +## Who uses Expr? + +* [Aviasales](https://aviasales.ru) uses Expr as a business rule engine for our flight search engine. +* [Wish.com](https://www.wish.com) uses Expr for decision-making rule engine in the Wish Assistant. +* [Argo](https://argoproj.github.io) uses Expr in Argo Rollouts and Argo Workflows for Kubernetes. +* [Crowdsec](https://crowdsec.net) uses Expr in a security automation tool. +* [FACEIT](https://www.faceit.com) uses Expr to allow customization of its eSports matchmaking algorithm. +* [qiniu](https://www.qiniu.com) uses Expr in trade systems. +* [Junglee Games](https://www.jungleegames.com/) uses Expr for an in house marketing retention tool [Project Audience](https://www.linkedin.com/pulse/meet-project-audience-our-no-code-swiss-army-knife-product-bharti). +* [OpenTelemetry](https://opentelemetry.io) uses Expr in the OpenTelemetry Collector. +* [Philips Labs](https://github.com/philips-labs/tabia) uses Expr in Tabia, a tool for collecting insights on the characteristics of our code bases. +* [CoreDNS](https://coredns.io) uses Expr in CoreDNS, a DNS server. +* [Chaos Mesh](https://chaos-mesh.org) uses Expr in Chaos Mesh, a cloud-native Chaos Engineering platform. +* [Milvus](https://milvus.io) uses Expr in Milvus, an open-source vector database. +* [Visually.io](https://visually.io) uses Expr as a business rule engine for our personalization targeting algorithm. +* [Akvorado](https://github.com/akvorado/akvorado) uses Expr to classify exporters and interfaces in network flows. [Add your company too](https://github.com/antonmedv/expr/edit/master/README.md) ## License -[MIT](LICENSE) +[MIT](https://github.com/antonmedv/expr/blob/master/LICENSE) diff --git a/vendor/github.com/antonmedv/expr/ast/node.go b/vendor/github.com/antonmedv/expr/ast/node.go index 4b2b5c277b..e85f853e91 100644 --- a/vendor/github.com/antonmedv/expr/ast/node.go +++ b/vendor/github.com/antonmedv/expr/ast/node.go @@ -4,6 +4,7 @@ import ( "reflect" "regexp" + "github.com/antonmedv/expr/builtin" "github.com/antonmedv/expr/file" ) @@ -48,8 +49,11 @@ type NilNode struct { type IdentifierNode struct { base - Value string - NilSafe bool + Value string + Deref bool + FieldIndex []int + Method bool // true if method, false if field + MethodIndex int // index of method, set only if Method is true } type IntegerNode struct { @@ -85,29 +89,29 @@ type UnaryNode struct { type BinaryNode struct { base + Regexp *regexp.Regexp Operator string Left Node Right Node } -type MatchesNode struct { +type ChainNode struct { base - Regexp *regexp.Regexp - Left Node - Right Node + Node Node } -type PropertyNode struct { +type MemberNode struct { base - Node Node - Property string - NilSafe bool -} + Node Node + Property Node + Name string // Name of the filed or method. Used for error reporting. + Optional bool + Deref bool + FieldIndex []int -type IndexNode struct { - base - Node Node - Index Node + // TODO: Replace with a single MethodIndex field of &int type. + Method bool + MethodIndex int } type SliceNode struct { @@ -117,19 +121,13 @@ type SliceNode struct { To Node } -type MethodNode struct { +type CallNode struct { base - Node Node - Method string - Arguments []Node - NilSafe bool -} - -type FunctionNode struct { - base - Name string + Callee Node Arguments []Node + Typed int Fast bool + Func *builtin.Function } type BuiltinNode struct { diff --git a/vendor/github.com/antonmedv/expr/ast/print.go b/vendor/github.com/antonmedv/expr/ast/print.go index 285984bd9a..56bc7dbe2e 100644 --- a/vendor/github.com/antonmedv/expr/ast/print.go +++ b/vendor/github.com/antonmedv/expr/ast/print.go @@ -29,9 +29,9 @@ func dump(v reflect.Value, ident string) string { return out + ident + "}" case reflect.Slice: if v.Len() == 0 { - return "[]" + return t.String() + "{}" } - out := "[\n" + out := t.String() + "{\n" for i := 0; i < v.Len(); i++ { s := v.Index(i) out += fmt.Sprintf("%v%v,", ident+"\t", dump(s, ident+"\t")) @@ -39,7 +39,7 @@ func dump(v reflect.Value, ident string) string { out += "\n" } } - return out + "\n" + ident + "]" + return out + "\n" + ident + "}" case reflect.Ptr: return dump(v.Elem(), ident) case reflect.Interface: diff --git a/vendor/github.com/antonmedv/expr/ast/visitor.go b/vendor/github.com/antonmedv/expr/ast/visitor.go index a3e270e030..351e5d72b2 100644 --- a/vendor/github.com/antonmedv/expr/ast/visitor.go +++ b/vendor/github.com/antonmedv/expr/ast/visitor.go @@ -3,106 +3,66 @@ package ast import "fmt" type Visitor interface { - Enter(node *Node) - Exit(node *Node) + Visit(node *Node) } -type walker struct { - visitor Visitor -} - -func Walk(node *Node, visitor Visitor) { - w := walker{ - visitor: visitor, - } - w.walk(node) -} - -func (w *walker) walk(node *Node) { - w.visitor.Enter(node) - +func Walk(node *Node, v Visitor) { switch n := (*node).(type) { case *NilNode: - w.visitor.Exit(node) case *IdentifierNode: - w.visitor.Exit(node) case *IntegerNode: - w.visitor.Exit(node) case *FloatNode: - w.visitor.Exit(node) case *BoolNode: - w.visitor.Exit(node) case *StringNode: - w.visitor.Exit(node) case *ConstantNode: - w.visitor.Exit(node) case *UnaryNode: - w.walk(&n.Node) - w.visitor.Exit(node) + Walk(&n.Node, v) case *BinaryNode: - w.walk(&n.Left) - w.walk(&n.Right) - w.visitor.Exit(node) - case *MatchesNode: - w.walk(&n.Left) - w.walk(&n.Right) - w.visitor.Exit(node) - case *PropertyNode: - w.walk(&n.Node) - w.visitor.Exit(node) - case *IndexNode: - w.walk(&n.Node) - w.walk(&n.Index) - w.visitor.Exit(node) + Walk(&n.Left, v) + Walk(&n.Right, v) + case *ChainNode: + Walk(&n.Node, v) + case *MemberNode: + Walk(&n.Node, v) + Walk(&n.Property, v) case *SliceNode: + Walk(&n.Node, v) if n.From != nil { - w.walk(&n.From) + Walk(&n.From, v) } if n.To != nil { - w.walk(&n.To) - } - w.visitor.Exit(node) - case *MethodNode: - w.walk(&n.Node) - for i := range n.Arguments { - w.walk(&n.Arguments[i]) + Walk(&n.To, v) } - w.visitor.Exit(node) - case *FunctionNode: + case *CallNode: + Walk(&n.Callee, v) for i := range n.Arguments { - w.walk(&n.Arguments[i]) + Walk(&n.Arguments[i], v) } - w.visitor.Exit(node) case *BuiltinNode: for i := range n.Arguments { - w.walk(&n.Arguments[i]) + Walk(&n.Arguments[i], v) } - w.visitor.Exit(node) case *ClosureNode: - w.walk(&n.Node) - w.visitor.Exit(node) + Walk(&n.Node, v) case *PointerNode: - w.visitor.Exit(node) case *ConditionalNode: - w.walk(&n.Cond) - w.walk(&n.Exp1) - w.walk(&n.Exp2) - w.visitor.Exit(node) + Walk(&n.Cond, v) + Walk(&n.Exp1, v) + Walk(&n.Exp2, v) case *ArrayNode: for i := range n.Nodes { - w.walk(&n.Nodes[i]) + Walk(&n.Nodes[i], v) } - w.visitor.Exit(node) case *MapNode: for i := range n.Pairs { - w.walk(&n.Pairs[i]) + Walk(&n.Pairs[i], v) } - w.visitor.Exit(node) case *PairNode: - w.walk(&n.Key) - w.walk(&n.Value) - w.visitor.Exit(node) + Walk(&n.Key, v) + Walk(&n.Value, v) default: panic(fmt.Sprintf("undefined node type (%T)", node)) } + + v.Visit(node) } diff --git a/vendor/github.com/antonmedv/expr/builtin/builtin.go b/vendor/github.com/antonmedv/expr/builtin/builtin.go new file mode 100644 index 0000000000..ad9376962e --- /dev/null +++ b/vendor/github.com/antonmedv/expr/builtin/builtin.go @@ -0,0 +1,101 @@ +package builtin + +import ( + "fmt" + "reflect" +) + +var ( + anyType = reflect.TypeOf(new(interface{})).Elem() + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) +) + +type Function struct { + Name string + Func func(args ...interface{}) (interface{}, error) + Opcode int + Types []reflect.Type + Validate func(args []reflect.Type) (reflect.Type, error) +} + +const ( + Len = iota + 1 + Abs + Int + Float +) + +var Builtins = map[int]*Function{ + Len: { + Name: "len", + Opcode: Len, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) != 1 { + return anyType, fmt.Errorf("invalid number of arguments for len (expected 1, got %d)", len(args)) + } + switch kind(args[0]) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String, reflect.Interface: + return integerType, nil + } + return anyType, fmt.Errorf("invalid argument for len (type %s)", args[0]) + }, + }, + Abs: { + Name: "abs", + Opcode: Abs, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) != 1 { + return anyType, fmt.Errorf("invalid number of arguments for abs (expected 1, got %d)", len(args)) + } + switch kind(args[0]) { + case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Interface: + return args[0], nil + } + return anyType, fmt.Errorf("invalid argument for abs (type %s)", args[0]) + }, + }, + Int: { + Name: "int", + Opcode: Int, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) != 1 { + return anyType, fmt.Errorf("invalid number of arguments for int (expected 1, got %d)", len(args)) + } + switch kind(args[0]) { + case reflect.Interface: + return integerType, nil + case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return integerType, nil + case reflect.String: + return integerType, nil + } + return anyType, fmt.Errorf("invalid argument for int (type %s)", args[0]) + }, + }, + Float: { + Name: "float", + Opcode: Float, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) != 1 { + return anyType, fmt.Errorf("invalid number of arguments for float (expected 1, got %d)", len(args)) + } + switch kind(args[0]) { + case reflect.Interface: + return floatType, nil + case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return floatType, nil + case reflect.String: + return floatType, nil + } + return anyType, fmt.Errorf("invalid argument for float (type %s)", args[0]) + }, + }, +} + +func kind(t reflect.Type) reflect.Kind { + if t == nil { + return reflect.Invalid + } + return t.Kind() +} diff --git a/vendor/github.com/antonmedv/expr/checker/checker.go b/vendor/github.com/antonmedv/expr/checker/checker.go index 282031a1fb..00025a33ce 100644 --- a/vendor/github.com/antonmedv/expr/checker/checker.go +++ b/vendor/github.com/antonmedv/expr/checker/checker.go @@ -3,182 +3,199 @@ package checker import ( "fmt" "reflect" + "regexp" "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/builtin" "github.com/antonmedv/expr/conf" "github.com/antonmedv/expr/file" "github.com/antonmedv/expr/parser" + "github.com/antonmedv/expr/vm" ) -var errorType = reflect.TypeOf((*error)(nil)).Elem() +func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) { + if config == nil { + config = conf.New(nil) + } -func Check(tree *parser.Tree, config *conf.Config) (reflect.Type, error) { v := &visitor{ + config: config, collections: make([]reflect.Type, 0), - } - if config != nil { - v.types = config.Types - v.operators = config.Operators - v.expect = config.Expect - v.strict = config.Strict - v.defaultType = config.DefaultType + parents: make([]ast.Node, 0), } - t := v.visit(tree.Node) + t, _ = v.visit(tree.Node) + + if v.err != nil { + return t, v.err.Bind(tree.Source) + } - if v.expect != reflect.Invalid { - switch v.expect { - case reflect.Int64, reflect.Float64: - if !isNumber(t) { - return nil, fmt.Errorf("expected %v, but got %v", v.expect, t) + if v.config.Expect != reflect.Invalid { + switch v.config.Expect { + case reflect.Int, reflect.Int64, reflect.Float64: + if !isNumber(t) && !isAny(t) { + return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t) } default: - if t.Kind() != v.expect { - return nil, fmt.Errorf("expected %v, but got %v", v.expect, t) + if t != nil { + if t.Kind() == v.config.Expect { + return t, nil + } } + return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t) } } - if v.err != nil { - return t, v.err.Bind(tree.Source) - } - return t, nil } type visitor struct { - types conf.TypesTable - operators conf.OperatorsTable - expect reflect.Kind + config *conf.Config collections []reflect.Type - strict bool - defaultType reflect.Type + parents []ast.Node err *file.Error } -func (v *visitor) visit(node ast.Node) reflect.Type { +type info struct { + method bool + fn *builtin.Function +} + +func (v *visitor) visit(node ast.Node) (reflect.Type, info) { var t reflect.Type + var i info + v.parents = append(v.parents, node) switch n := node.(type) { case *ast.NilNode: - t = v.NilNode(n) + t, i = v.NilNode(n) case *ast.IdentifierNode: - t = v.IdentifierNode(n) + t, i = v.IdentifierNode(n) case *ast.IntegerNode: - t = v.IntegerNode(n) + t, i = v.IntegerNode(n) case *ast.FloatNode: - t = v.FloatNode(n) + t, i = v.FloatNode(n) case *ast.BoolNode: - t = v.BoolNode(n) + t, i = v.BoolNode(n) case *ast.StringNode: - t = v.StringNode(n) + t, i = v.StringNode(n) case *ast.ConstantNode: - t = v.ConstantNode(n) + t, i = v.ConstantNode(n) case *ast.UnaryNode: - t = v.UnaryNode(n) + t, i = v.UnaryNode(n) case *ast.BinaryNode: - t = v.BinaryNode(n) - case *ast.MatchesNode: - t = v.MatchesNode(n) - case *ast.PropertyNode: - t = v.PropertyNode(n) - case *ast.IndexNode: - t = v.IndexNode(n) + t, i = v.BinaryNode(n) + case *ast.ChainNode: + t, i = v.ChainNode(n) + case *ast.MemberNode: + t, i = v.MemberNode(n) case *ast.SliceNode: - t = v.SliceNode(n) - case *ast.MethodNode: - t = v.MethodNode(n) - case *ast.FunctionNode: - t = v.FunctionNode(n) + t, i = v.SliceNode(n) + case *ast.CallNode: + t, i = v.CallNode(n) case *ast.BuiltinNode: - t = v.BuiltinNode(n) + t, i = v.BuiltinNode(n) case *ast.ClosureNode: - t = v.ClosureNode(n) + t, i = v.ClosureNode(n) case *ast.PointerNode: - t = v.PointerNode(n) + t, i = v.PointerNode(n) case *ast.ConditionalNode: - t = v.ConditionalNode(n) + t, i = v.ConditionalNode(n) case *ast.ArrayNode: - t = v.ArrayNode(n) + t, i = v.ArrayNode(n) case *ast.MapNode: - t = v.MapNode(n) + t, i = v.MapNode(n) case *ast.PairNode: - t = v.PairNode(n) + t, i = v.PairNode(n) default: panic(fmt.Sprintf("undefined node type (%T)", node)) } + v.parents = v.parents[:len(v.parents)-1] node.SetType(t) - return t + return t, i } -func (v *visitor) error(node ast.Node, format string, args ...interface{}) reflect.Type { +func (v *visitor) error(node ast.Node, format string, args ...interface{}) (reflect.Type, info) { if v.err == nil { // show first error v.err = &file.Error{ Location: node.Location(), Message: fmt.Sprintf(format, args...), } } - return interfaceType // interface represent undefined type + return anyType, info{} // interface represent undefined type } -func (v *visitor) NilNode(*ast.NilNode) reflect.Type { - return nilType +func (v *visitor) NilNode(*ast.NilNode) (reflect.Type, info) { + return nilType, info{} } -func (v *visitor) IdentifierNode(node *ast.IdentifierNode) reflect.Type { - if v.types == nil { - return interfaceType +func (v *visitor) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) { + if fn, ok := v.config.Functions[node.Value]; ok { + // Return anyType instead of func type as we don't know the arguments yet. + // The func type can be one of the fn.Types. The type will be resolved + // when the arguments are known in CallNode. + return anyType, info{fn: fn} } - if t, ok := v.types[node.Value]; ok { + if v.config.Types == nil { + node.Deref = true + } else if t, ok := v.config.Types[node.Value]; ok { if t.Ambiguous { return v.error(node, "ambiguous identifier %v", node.Value) } - return t.Type - } - if !v.strict { - if v.defaultType != nil { - return v.defaultType - } - return interfaceType + d, c := deref(t.Type) + node.Deref = c + node.Method = t.Method + node.MethodIndex = t.MethodIndex + node.FieldIndex = t.FieldIndex + return d, info{method: t.Method} } - if !node.NilSafe { + if v.config.Strict { return v.error(node, "unknown name %v", node.Value) } - return nilType + if v.config.DefaultType != nil { + return v.config.DefaultType, info{} + } + return anyType, info{} } -func (v *visitor) IntegerNode(*ast.IntegerNode) reflect.Type { - return integerType +func (v *visitor) IntegerNode(*ast.IntegerNode) (reflect.Type, info) { + return integerType, info{} } -func (v *visitor) FloatNode(*ast.FloatNode) reflect.Type { - return floatType +func (v *visitor) FloatNode(*ast.FloatNode) (reflect.Type, info) { + return floatType, info{} } -func (v *visitor) BoolNode(*ast.BoolNode) reflect.Type { - return boolType +func (v *visitor) BoolNode(*ast.BoolNode) (reflect.Type, info) { + return boolType, info{} } -func (v *visitor) StringNode(*ast.StringNode) reflect.Type { - return stringType +func (v *visitor) StringNode(*ast.StringNode) (reflect.Type, info) { + return stringType, info{} } -func (v *visitor) ConstantNode(node *ast.ConstantNode) reflect.Type { - return reflect.TypeOf(node.Value) +func (v *visitor) ConstantNode(node *ast.ConstantNode) (reflect.Type, info) { + return reflect.TypeOf(node.Value), info{} } -func (v *visitor) UnaryNode(node *ast.UnaryNode) reflect.Type { - t := v.visit(node.Node) +func (v *visitor) UnaryNode(node *ast.UnaryNode) (reflect.Type, info) { + t, _ := v.visit(node.Node) switch node.Operator { case "!", "not": if isBool(t) { - return boolType + return boolType, info{} + } + if isAny(t) { + return boolType, info{} } case "+", "-": if isNumber(t) { - return t + return t, info{} + } + if isAny(t) { + return anyType, info{} } default: @@ -188,83 +205,170 @@ func (v *visitor) UnaryNode(node *ast.UnaryNode) reflect.Type { return v.error(node, `invalid operation: %v (mismatched type %v)`, node.Operator, t) } -func (v *visitor) BinaryNode(node *ast.BinaryNode) reflect.Type { - l := v.visit(node.Left) - r := v.visit(node.Right) +func (v *visitor) BinaryNode(node *ast.BinaryNode) (reflect.Type, info) { + l, _ := v.visit(node.Left) + r, _ := v.visit(node.Right) // check operator overloading - if fns, ok := v.operators[node.Operator]; ok { - t, _, ok := conf.FindSuitableOperatorOverload(fns, v.types, l, r) + if fns, ok := v.config.Operators[node.Operator]; ok { + t, _, ok := conf.FindSuitableOperatorOverload(fns, v.config.Types, l, r) if ok { - return t + return t, info{} } } switch node.Operator { case "==", "!=": if isNumber(l) && isNumber(r) { - return boolType + return boolType, info{} + } + if l == nil || r == nil { // It is possible to compare with nil. + return boolType, info{} + } + if l.Kind() == r.Kind() { + return boolType, info{} } - if isComparable(l, r) { - return boolType + if isAny(l) || isAny(r) { + return boolType, info{} } case "or", "||", "and", "&&": if isBool(l) && isBool(r) { - return boolType + return boolType, info{} + } + if or(l, r, isBool) { + return boolType, info{} } - case "in", "not in": - if isString(l) && isStruct(r) { - return boolType + case "<", ">", ">=", "<=": + if isNumber(l) && isNumber(r) { + return boolType, info{} } - if isMap(r) { - return boolType + if isString(l) && isString(r) { + return boolType, info{} } - if isArray(r) { - return boolType + if isTime(l) && isTime(r) { + return boolType, info{} + } + if or(l, r, isNumber, isString, isTime) { + return boolType, info{} } - case "<", ">", ">=", "<=": + case "-": if isNumber(l) && isNumber(r) { - return boolType + return combined(l, r), info{} } - if isString(l) && isString(r) { - return boolType + if isTime(l) && isTime(r) { + return durationType, info{} + } + if or(l, r, isNumber, isTime) { + return anyType, info{} } - case "/", "-", "*": + case "/", "*": if isNumber(l) && isNumber(r) { - return combined(l, r) + return combined(l, r), info{} + } + if or(l, r, isNumber) { + return anyType, info{} } - case "**": + case "**", "^": if isNumber(l) && isNumber(r) { - return floatType + return floatType, info{} + } + if or(l, r, isNumber) { + return floatType, info{} } case "%": if isInteger(l) && isInteger(r) { - return combined(l, r) + return combined(l, r), info{} + } + if or(l, r, isInteger) { + return anyType, info{} } case "+": if isNumber(l) && isNumber(r) { - return combined(l, r) + return combined(l, r), info{} + } + if isString(l) && isString(r) { + return stringType, info{} + } + if isTime(l) && isDuration(r) { + return timeType, info{} + } + if isDuration(l) && isTime(r) { + return timeType, info{} + } + if or(l, r, isNumber, isString, isTime, isDuration) { + return anyType, info{} + } + + case "in": + if (isString(l) || isAny(l)) && isStruct(r) { + return boolType, info{} + } + if isMap(r) { + return boolType, info{} + } + if isArray(r) { + return boolType, info{} + } + if isAny(l) && anyOf(r, isString, isArray, isMap) { + return boolType, info{} + } + if isAny(r) { + return boolType, info{} + } + + case "matches": + if s, ok := node.Right.(*ast.StringNode); ok { + r, err := regexp.Compile(s.Value) + if err != nil { + return v.error(node, err.Error()) + } + node.Regexp = r } if isString(l) && isString(r) { - return stringType + return boolType, info{} + } + if or(l, r, isString) { + return boolType, info{} } case "contains", "startsWith", "endsWith": if isString(l) && isString(r) { - return boolType + return boolType, info{} + } + if or(l, r, isString) { + return boolType, info{} } case "..": + ret := reflect.SliceOf(integerType) if isInteger(l) && isInteger(r) { - return reflect.SliceOf(integerType) + return ret, info{} + } + if or(l, r, isInteger) { + return ret, info{} + } + + case "??": + if l == nil && r != nil { + return r, info{} + } + if l != nil && r == nil { + return l, info{} + } + if l == nil && r == nil { + return nilType, info{} } + if r.AssignableTo(l) { + return l, info{} + } + return anyType, info{} default: return v.error(node, "unknown operator (%v)", node.Operator) @@ -274,169 +378,268 @@ func (v *visitor) BinaryNode(node *ast.BinaryNode) reflect.Type { return v.error(node, `invalid operation: %v (mismatched types %v and %v)`, node.Operator, l, r) } -func (v *visitor) MatchesNode(node *ast.MatchesNode) reflect.Type { - l := v.visit(node.Left) - r := v.visit(node.Right) - - if isString(l) && isString(r) { - return boolType - } - - return v.error(node, `invalid operation: matches (mismatched types %v and %v)`, l, r) +func (v *visitor) ChainNode(node *ast.ChainNode) (reflect.Type, info) { + return v.visit(node.Node) } -func (v *visitor) PropertyNode(node *ast.PropertyNode) reflect.Type { - t := v.visit(node.Node) - if t, ok := fieldType(t, node.Property); ok { - return t +func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) { + base, _ := v.visit(node.Node) + prop, _ := v.visit(node.Property) + + if name, ok := node.Property.(*ast.StringNode); ok { + if base == nil { + return v.error(node, "type %v has no field %v", base, name.Value) + } + // First, check methods defined on base type itself, + // independent of which type it is. Without dereferencing. + if m, ok := base.MethodByName(name.Value); ok { + if base.Kind() == reflect.Interface { + // In case of interface type method will not have a receiver, + // and to prevent checker decreasing numbers of in arguments + // return method type as not method (second argument is false). + + // Also, we can not use m.Index here, because it will be + // different indexes for different types which implement + // the same interface. + return m.Type, info{} + } else { + node.Method = true + node.MethodIndex = m.Index + node.Name = name.Value + return m.Type, info{method: true} + } + } } - if !node.NilSafe { - return v.error(node, "type %v has no field %v", t, node.Property) + + if base.Kind() == reflect.Ptr { + base = base.Elem() } - return nil -} -func (v *visitor) IndexNode(node *ast.IndexNode) reflect.Type { - t := v.visit(node.Node) - i := v.visit(node.Index) + switch base.Kind() { + case reflect.Interface: + node.Deref = true + return anyType, info{} - if t, ok := indexType(t); ok { - if !isInteger(i) && !isString(i) { - return v.error(node, "invalid operation: cannot use %v as index to %v", i, t) + case reflect.Map: + if prop != nil && !prop.AssignableTo(base.Key()) && !isAny(prop) { + return v.error(node.Property, "cannot use %v to get an element from %v", prop, base) + } + t, c := deref(base.Elem()) + node.Deref = c + return t, info{} + + case reflect.Array, reflect.Slice: + if !isInteger(prop) && !isAny(prop) { + return v.error(node.Property, "array elements can only be selected using an integer (got %v)", prop) + } + t, c := deref(base.Elem()) + node.Deref = c + return t, info{} + + case reflect.Struct: + if name, ok := node.Property.(*ast.StringNode); ok { + propertyName := name.Value + if field, ok := fetchField(base, propertyName); ok { + t, c := deref(field.Type) + node.Deref = c + node.FieldIndex = field.Index + node.Name = propertyName + return t, info{} + } + if len(v.parents) > 1 { + if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok { + return v.error(node, "type %v has no method %v", base, propertyName) + } + } + return v.error(node, "type %v has no field %v", base, propertyName) } - return t } - return v.error(node, "invalid operation: type %v does not support indexing", t) + return v.error(node, "type %v[%v] is undefined", base, prop) } -func (v *visitor) SliceNode(node *ast.SliceNode) reflect.Type { - t := v.visit(node.Node) +func (v *visitor) SliceNode(node *ast.SliceNode) (reflect.Type, info) { + t, _ := v.visit(node.Node) - _, isIndex := indexType(t) + switch t.Kind() { + case reflect.Interface: + // ok + case reflect.String, reflect.Array, reflect.Slice: + // ok + default: + return v.error(node, "cannot slice %v", t) + } - if isIndex || isString(t) { - if node.From != nil { - from := v.visit(node.From) - if !isInteger(from) { - return v.error(node.From, "invalid operation: non-integer slice index %v", from) - } + if node.From != nil { + from, _ := v.visit(node.From) + if !isInteger(from) && !isAny(from) { + return v.error(node.From, "non-integer slice index %v", from) } - if node.To != nil { - to := v.visit(node.To) - if !isInteger(to) { - return v.error(node.To, "invalid operation: non-integer slice index %v", to) - } + } + if node.To != nil { + to, _ := v.visit(node.To) + if !isInteger(to) && !isAny(to) { + return v.error(node.To, "non-integer slice index %v", to) } - return t } - - return v.error(node, "invalid operation: cannot slice %v", t) + return t, info{} } -func (v *visitor) FunctionNode(node *ast.FunctionNode) reflect.Type { - if f, ok := v.types[node.Name]; ok { - if fn, ok := isFuncType(f.Type); ok { +func (v *visitor) CallNode(node *ast.CallNode) (reflect.Type, info) { + fn, fnInfo := v.visit(node.Callee) - inputParamsCount := 1 // for functions - if f.Method { - inputParamsCount = 2 // for methods + if fnInfo.fn != nil { + f := fnInfo.fn + node.Func = f + if f.Validate != nil { + args := make([]reflect.Type, len(node.Arguments)) + for i, arg := range node.Arguments { + args[i], _ = v.visit(arg) } - - if !isInterface(fn) && - fn.IsVariadic() && - fn.NumIn() == inputParamsCount && - ((fn.NumOut() == 1 && // Function with one return value - fn.Out(0).Kind() == reflect.Interface) || - (fn.NumOut() == 2 && // Function with one return value and an error - fn.Out(0).Kind() == reflect.Interface && - fn.Out(1) == errorType)) { - rest := fn.In(fn.NumIn() - 1) // function has only one param for functions and two for methods - if rest.Kind() == reflect.Slice && rest.Elem().Kind() == reflect.Interface { - node.Fast = true + t, err := f.Validate(args) + if err != nil { + return v.error(node, "%v", err) + } + return t, info{} + } + if len(f.Types) == 0 { + t, err := v.checkFunc(f.Name, functionType, false, node) + if err != nil { + if v.err == nil { + v.err = err } + return anyType, info{} } - - return v.checkFunc(fn, f.Method, node, node.Name, node.Arguments) + // No type was specified, so we assume the function returns any. + return t, info{} + } + var lastErr *file.Error + for _, t := range f.Types { + outType, err := v.checkFunc(f.Name, t, false, node) + if err != nil { + lastErr = err + continue + } + return outType, info{} } - } - if !v.strict { - if v.defaultType != nil { - return v.defaultType + if lastErr != nil { + if v.err == nil { + v.err = lastErr + } + return anyType, info{} } - return interfaceType } - return v.error(node, "unknown func %v", node.Name) -} -func (v *visitor) MethodNode(node *ast.MethodNode) reflect.Type { - t := v.visit(node.Node) - if f, method, ok := methodType(t, node.Method); ok { - if fn, ok := isFuncType(f); ok { - return v.checkFunc(fn, method, node, node.Method, node.Arguments) + fnName := "function" + if identifier, ok := node.Callee.(*ast.IdentifierNode); ok { + fnName = identifier.Value + } + if member, ok := node.Callee.(*ast.MemberNode); ok { + if name, ok := member.Property.(*ast.StringNode); ok { + fnName = name.Value } } - if !node.NilSafe { - return v.error(node, "type %v has no method %v", t, node.Method) + switch fn.Kind() { + case reflect.Interface: + return anyType, info{} + case reflect.Func: + inputParamsCount := 1 // for functions + if fnInfo.method { + inputParamsCount = 2 // for methods + } + // TODO: Deprecate OpCallFast and move fn(...any) any to TypedFunc list. + // To do this we need add support for variadic arguments in OpCallTyped. + if !isAny(fn) && + fn.IsVariadic() && + fn.NumIn() == inputParamsCount && + fn.NumOut() == 1 && + fn.Out(0).Kind() == reflect.Interface { + rest := fn.In(fn.NumIn() - 1) // function has only one param for functions and two for methods + if rest.Kind() == reflect.Slice && rest.Elem().Kind() == reflect.Interface { + node.Fast = true + } + } + + outType, err := v.checkFunc(fnName, fn, fnInfo.method, node) + if err != nil { + if v.err == nil { + v.err = err + } + return anyType, info{} + } + + v.findTypedFunc(node, fn, fnInfo.method) + + return outType, info{} } - return nil + return v.error(node, "%v is not callable", fn) } -// checkFunc checks func arguments and returns "return type" of func or method. -func (v *visitor) checkFunc(fn reflect.Type, method bool, node ast.Node, name string, arguments []ast.Node) reflect.Type { - if isInterface(fn) { - return interfaceType +func (v *visitor) checkFunc(name string, fn reflect.Type, method bool, node *ast.CallNode) (reflect.Type, *file.Error) { + if isAny(fn) { + return anyType, nil } if fn.NumOut() == 0 { - return v.error(node, "func %v doesn't return value", name) + return anyType, &file.Error{ + Location: node.Location(), + Message: fmt.Sprintf("func %v doesn't return value", name), + } } if numOut := fn.NumOut(); numOut > 2 { - return v.error(node, "func %v returns more then two values", name) + return anyType, &file.Error{ + Location: node.Location(), + Message: fmt.Sprintf("func %v returns more then two values", name), + } } - numIn := fn.NumIn() - // If func is method on an env, first argument should be a receiver, - // and actual arguments less then numIn by one. + // and actual arguments less than fnNumIn by one. + fnNumIn := fn.NumIn() + if method { + fnNumIn-- + } + // Skip first argument in case of the receiver. + fnInOffset := 0 if method { - numIn-- + fnInOffset = 1 } if fn.IsVariadic() { - if len(arguments) < numIn-1 { - return v.error(node, "not enough arguments to call %v", name) + if len(node.Arguments) < fnNumIn-1 { + return anyType, &file.Error{ + Location: node.Location(), + Message: fmt.Sprintf("not enough arguments to call %v", name), + } } } else { - if len(arguments) > numIn { - return v.error(node, "too many arguments to call %v", name) + if len(node.Arguments) > fnNumIn { + return anyType, &file.Error{ + Location: node.Location(), + Message: fmt.Sprintf("too many arguments to call %v", name), + } } - if len(arguments) < numIn { - return v.error(node, "not enough arguments to call %v", name) + if len(node.Arguments) < fnNumIn { + return anyType, &file.Error{ + Location: node.Location(), + Message: fmt.Sprintf("not enough arguments to call %v", name), + } } } - offset := 0 - - // Skip first argument in case of the receiver. - if method { - offset = 1 - } - - for i, arg := range arguments { - t := v.visit(arg) + for i, arg := range node.Arguments { + t, _ := v.visit(arg) var in reflect.Type - if fn.IsVariadic() && i >= numIn-1 { + if fn.IsVariadic() && i >= fnNumIn-1 { // For variadic arguments fn(xs ...int), go replaces type of xs (int) with ([]int). // As we compare arguments one by one, we need underling type. - in = fn.In(fn.NumIn() - 1) - in, _ = indexType(in) + in = fn.In(fn.NumIn() - 1).Elem() } else { - in = fn.In(i + offset) + in = fn.In(i + fnInOffset) } - if isIntegerOrArithmeticOperation(arg) { + if isIntegerOrArithmeticOperation(arg) && (isInteger(in) || isFloat(in)) { t = in setTypeForIntegers(arg, t) } @@ -446,104 +649,99 @@ func (v *visitor) checkFunc(fn reflect.Type, method bool, node ast.Node, name st } if !t.AssignableTo(in) && t.Kind() != reflect.Interface { - return v.error(arg, "cannot use %v as argument (type %v) to call %v ", t, in, name) + return anyType, &file.Error{ + Location: arg.Location(), + Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name), + } } } - return fn.Out(0) + return fn.Out(0), nil } -func (v *visitor) BuiltinNode(node *ast.BuiltinNode) reflect.Type { +func (v *visitor) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { switch node.Name { - - case "len": - param := v.visit(node.Arguments[0]) - if isArray(param) || isMap(param) || isString(param) { - return integerType - } - return v.error(node, "invalid argument for len (type %v)", param) - case "all", "none", "any", "one": - collection := v.visit(node.Arguments[0]) - if !isArray(collection) { + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } v.collections = append(v.collections, collection) - closure := v.visit(node.Arguments[1]) + closure, _ := v.visit(node.Arguments[1]) v.collections = v.collections[:len(v.collections)-1] if isFunc(closure) && closure.NumOut() == 1 && - closure.NumIn() == 1 && isInterface(closure.In(0)) { + closure.NumIn() == 1 && isAny(closure.In(0)) { - if !isBool(closure.Out(0)) { + if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) } - return boolType + return boolType, info{} } return v.error(node.Arguments[1], "closure should has one input and one output param") case "filter": - collection := v.visit(node.Arguments[0]) - if !isArray(collection) { + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } v.collections = append(v.collections, collection) - closure := v.visit(node.Arguments[1]) + closure, _ := v.visit(node.Arguments[1]) v.collections = v.collections[:len(v.collections)-1] if isFunc(closure) && closure.NumOut() == 1 && - closure.NumIn() == 1 && isInterface(closure.In(0)) { + closure.NumIn() == 1 && isAny(closure.In(0)) { - if !isBool(closure.Out(0)) { + if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) } - if isInterface(collection) { - return arrayType + if isAny(collection) { + return arrayType, info{} } - return reflect.SliceOf(collection.Elem()) + return reflect.SliceOf(collection.Elem()), info{} } return v.error(node.Arguments[1], "closure should has one input and one output param") case "map": - collection := v.visit(node.Arguments[0]) - if !isArray(collection) { + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } v.collections = append(v.collections, collection) - closure := v.visit(node.Arguments[1]) + closure, _ := v.visit(node.Arguments[1]) v.collections = v.collections[:len(v.collections)-1] if isFunc(closure) && closure.NumOut() == 1 && - closure.NumIn() == 1 && isInterface(closure.In(0)) { + closure.NumIn() == 1 && isAny(closure.In(0)) { - return reflect.SliceOf(closure.Out(0)) + return reflect.SliceOf(closure.Out(0)), info{} } return v.error(node.Arguments[1], "closure should has one input and one output param") case "count": - collection := v.visit(node.Arguments[0]) - if !isArray(collection) { + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } v.collections = append(v.collections, collection) - closure := v.visit(node.Arguments[1]) + closure, _ := v.visit(node.Arguments[1]) v.collections = v.collections[:len(v.collections)-1] if isFunc(closure) && closure.NumOut() == 1 && - closure.NumIn() == 1 && isInterface(closure.In(0)) { - if !isBool(closure.Out(0)) { + closure.NumIn() == 1 && isAny(closure.In(0)) { + if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) } - return integerType + return integerType, info{} } return v.error(node.Arguments[1], "closure should has one input and one output param") @@ -552,64 +750,107 @@ func (v *visitor) BuiltinNode(node *ast.BuiltinNode) reflect.Type { } } -func (v *visitor) ClosureNode(node *ast.ClosureNode) reflect.Type { - t := v.visit(node.Node) - return reflect.FuncOf([]reflect.Type{interfaceType}, []reflect.Type{t}, false) +func (v *visitor) ClosureNode(node *ast.ClosureNode) (reflect.Type, info) { + t, _ := v.visit(node.Node) + return reflect.FuncOf([]reflect.Type{anyType}, []reflect.Type{t}, false), info{} } -func (v *visitor) PointerNode(node *ast.PointerNode) reflect.Type { +func (v *visitor) PointerNode(node *ast.PointerNode) (reflect.Type, info) { if len(v.collections) == 0 { return v.error(node, "cannot use pointer accessor outside closure") } collection := v.collections[len(v.collections)-1] - - if t, ok := indexType(collection); ok { - return t + switch collection.Kind() { + case reflect.Interface: + return anyType, info{} + case reflect.Array, reflect.Slice: + return collection.Elem(), info{} } return v.error(node, "cannot use %v as array", collection) } -func (v *visitor) ConditionalNode(node *ast.ConditionalNode) reflect.Type { - c := v.visit(node.Cond) - if !isBool(c) { +func (v *visitor) ConditionalNode(node *ast.ConditionalNode) (reflect.Type, info) { + c, _ := v.visit(node.Cond) + if !isBool(c) && !isAny(c) { return v.error(node.Cond, "non-bool expression (type %v) used as condition", c) } - t1 := v.visit(node.Exp1) - t2 := v.visit(node.Exp2) + t1, _ := v.visit(node.Exp1) + t2, _ := v.visit(node.Exp2) if t1 == nil && t2 != nil { - return t2 + return t2, info{} } if t1 != nil && t2 == nil { - return t1 + return t1, info{} } if t1 == nil && t2 == nil { - return nilType + return nilType, info{} } if t1.AssignableTo(t2) { - return t1 + return t1, info{} } - return interfaceType + return anyType, info{} } -func (v *visitor) ArrayNode(node *ast.ArrayNode) reflect.Type { +func (v *visitor) ArrayNode(node *ast.ArrayNode) (reflect.Type, info) { for _, node := range node.Nodes { - _ = v.visit(node) + v.visit(node) } - return arrayType + return arrayType, info{} } -func (v *visitor) MapNode(node *ast.MapNode) reflect.Type { +func (v *visitor) MapNode(node *ast.MapNode) (reflect.Type, info) { for _, pair := range node.Pairs { v.visit(pair) } - return mapType + return mapType, info{} } -func (v *visitor) PairNode(node *ast.PairNode) reflect.Type { +func (v *visitor) PairNode(node *ast.PairNode) (reflect.Type, info) { v.visit(node.Key) v.visit(node.Value) - return nilType + return nilType, info{} +} + +func (v *visitor) findTypedFunc(node *ast.CallNode, fn reflect.Type, method bool) { + // OnCallTyped doesn't work for functions with variadic arguments, + // and doesn't work named function, like `type MyFunc func() int`. + // In PkgPath() is an empty string, it's unnamed function. + if !fn.IsVariadic() && fn.PkgPath() == "" { + fnNumIn := fn.NumIn() + fnInOffset := 0 + if method { + fnNumIn-- + fnInOffset = 1 + } + funcTypes: + for i := range vm.FuncTypes { + if i == 0 { + continue + } + typed := reflect.ValueOf(vm.FuncTypes[i]).Elem().Type() + if typed.Kind() != reflect.Func { + continue + } + if typed.NumOut() != fn.NumOut() { + continue + } + for j := 0; j < typed.NumOut(); j++ { + if typed.Out(j) != fn.Out(j) { + continue funcTypes + } + } + if typed.NumIn() != fnNumIn { + continue + } + for j := 0; j < typed.NumIn(); j++ { + if typed.In(j) != fn.In(j+fnInOffset) { + continue funcTypes + } + } + node.Typed = i + } + } } diff --git a/vendor/github.com/antonmedv/expr/checker/types.go b/vendor/github.com/antonmedv/expr/checker/types.go index 756ed8f5db..7ccd894809 100644 --- a/vendor/github.com/antonmedv/expr/checker/types.go +++ b/vendor/github.com/antonmedv/expr/checker/types.go @@ -2,88 +2,60 @@ package checker import ( "reflect" + "time" "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/conf" ) var ( - nilType = reflect.TypeOf(nil) - boolType = reflect.TypeOf(true) - integerType = reflect.TypeOf(int(0)) - floatType = reflect.TypeOf(float64(0)) - stringType = reflect.TypeOf("") - arrayType = reflect.TypeOf([]interface{}{}) - mapType = reflect.TypeOf(map[string]interface{}{}) - interfaceType = reflect.TypeOf(new(interface{})).Elem() + nilType = reflect.TypeOf(nil) + boolType = reflect.TypeOf(true) + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + arrayType = reflect.TypeOf([]interface{}{}) + mapType = reflect.TypeOf(map[string]interface{}{}) + anyType = reflect.TypeOf(new(interface{})).Elem() + timeType = reflect.TypeOf(time.Time{}) + durationType = reflect.TypeOf(time.Duration(0)) + functionType = reflect.TypeOf(new(func(...interface{}) (interface{}, error))).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() ) -func typeWeight(t reflect.Type) int { - switch t.Kind() { - case reflect.Uint: - return 1 - case reflect.Uint8: - return 2 - case reflect.Uint16: - return 3 - case reflect.Uint32: - return 4 - case reflect.Uint64: - return 5 - case reflect.Int: - return 6 - case reflect.Int8: - return 7 - case reflect.Int16: - return 8 - case reflect.Int32: - return 9 - case reflect.Int64: - return 10 - case reflect.Float32: - return 11 - case reflect.Float64: - return 12 - default: - return 0 - } -} - func combined(a, b reflect.Type) reflect.Type { - if typeWeight(a) > typeWeight(b) { + if a.Kind() == b.Kind() { return a - } else { - return b } + if isFloat(a) || isFloat(b) { + return floatType + } + return integerType } -func dereference(t reflect.Type) reflect.Type { - if t == nil { - return nil - } - if t.Kind() == reflect.Ptr { - t = dereference(t.Elem()) +func anyOf(t reflect.Type, fns ...func(reflect.Type) bool) bool { + for _, fn := range fns { + if fn(t) { + return true + } } - return t + return false } -func isComparable(l, r reflect.Type) bool { - l = dereference(l) - r = dereference(r) - - if l == nil || r == nil { // It is possible to compare with nil. +func or(l, r reflect.Type, fns ...func(reflect.Type) bool) bool { + if isAny(l) && isAny(r) { return true } - if l.Kind() == r.Kind() { + if isAny(l) && anyOf(r, fns...) { return true } - if isInterface(l) || isInterface(r) { + if isAny(r) && anyOf(l, fns...) { return true } return false } -func isInterface(t reflect.Type) bool { - t = dereference(t) +func isAny(t reflect.Type) bool { if t != nil { switch t.Kind() { case reflect.Interface: @@ -94,28 +66,22 @@ func isInterface(t reflect.Type) bool { } func isInteger(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: fallthrough case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return true - case reflect.Interface: - return true } } return false } func isFloat(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { case reflect.Float32, reflect.Float64: return true - case reflect.Interface: - return true } } return false @@ -125,62 +91,75 @@ func isNumber(t reflect.Type) bool { return isInteger(t) || isFloat(t) } +func isTime(t reflect.Type) bool { + if t != nil { + switch t { + case timeType: + return true + } + } + return isAny(t) +} + +func isDuration(t reflect.Type) bool { + if t != nil { + switch t { + case durationType: + return true + } + } + return false +} + func isBool(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { case reflect.Bool: return true - case reflect.Interface: - return true } } return false } func isString(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { case reflect.String: return true - case reflect.Interface: - return true } } return false } func isArray(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { + case reflect.Ptr: + return isArray(t.Elem()) case reflect.Slice, reflect.Array: return true - case reflect.Interface: - return true } } return false } func isMap(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { + case reflect.Ptr: + return isMap(t.Elem()) case reflect.Map: return true - case reflect.Interface: - return true } } return false } func isStruct(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { + case reflect.Ptr: + return isStruct(t.Elem()) case reflect.Struct: return true } @@ -189,9 +168,10 @@ func isStruct(t reflect.Type) bool { } func isFunc(t reflect.Type) bool { - t = dereference(t) if t != nil { switch t.Kind() { + case reflect.Ptr: + return isFunc(t.Elem()) case reflect.Func: return true } @@ -199,117 +179,50 @@ func isFunc(t reflect.Type) bool { return false } -func fieldType(ntype reflect.Type, name string) (reflect.Type, bool) { - ntype = dereference(ntype) - if ntype != nil { - switch ntype.Kind() { - case reflect.Interface: - return interfaceType, true - case reflect.Struct: - // First check all struct's fields. - for i := 0; i < ntype.NumField(); i++ { - f := ntype.Field(i) - if f.Name == name { - return f.Type, true - } - } - - // Second check fields of embedded structs. - for i := 0; i < ntype.NumField(); i++ { - f := ntype.Field(i) - if f.Anonymous { - if t, ok := fieldType(f.Type, name); ok { - return t, true - } - } - } - case reflect.Map: - return ntype.Elem(), true - } - } - - return nil, false -} - -func methodType(t reflect.Type, name string) (reflect.Type, bool, bool) { +func fetchField(t reflect.Type, name string) (reflect.StructField, bool) { if t != nil { - // First, check methods defined on type itself, - // independent of which type it is. - if m, ok := t.MethodByName(name); ok { - if t.Kind() == reflect.Interface { - // In case of interface type method will not have a receiver, - // and to prevent checker decreasing numbers of in arguments - // return method type as not method (second argument is false). - return m.Type, false, true - } else { - return m.Type, true, true + // First check all structs fields. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // Search all fields, even embedded structs. + if conf.FieldName(field) == name { + return field, true } } - d := t - if t.Kind() == reflect.Ptr { - d = t.Elem() - } - - switch d.Kind() { - case reflect.Interface: - return interfaceType, false, true - case reflect.Struct: - // First, check all struct's fields. - for i := 0; i < d.NumField(); i++ { - f := d.Field(i) - if !f.Anonymous && f.Name == name { - return f.Type, false, true - } - } - - // Second, check fields of embedded structs. - for i := 0; i < d.NumField(); i++ { - f := d.Field(i) - if f.Anonymous { - if t, method, ok := methodType(f.Type, name); ok { - return t, method, true - } + // Second check fields of embedded structs. + for i := 0; i < t.NumField(); i++ { + anon := t.Field(i) + if anon.Anonymous { + if field, ok := fetchField(anon.Type, name); ok { + field.Index = append(anon.Index, field.Index...) + return field, true } } - - case reflect.Map: - return d.Elem(), false, true } } - return nil, false, false + return reflect.StructField{}, false } -func indexType(ntype reflect.Type) (reflect.Type, bool) { - ntype = dereference(ntype) - if ntype == nil { +func deref(t reflect.Type) (reflect.Type, bool) { + if t == nil { return nil, false } - - switch ntype.Kind() { - case reflect.Interface: - return interfaceType, true - case reflect.Map, reflect.Array, reflect.Slice: - return ntype.Elem(), true - } - - return nil, false -} - -func isFuncType(ntype reflect.Type) (reflect.Type, bool) { - ntype = dereference(ntype) - if ntype == nil { - return nil, false + if t.Kind() == reflect.Interface { + return t, true } - - switch ntype.Kind() { - case reflect.Interface: - return interfaceType, true - case reflect.Func: - return ntype, true + found := false + for t != nil && t.Kind() == reflect.Ptr { + e := t.Elem() + switch e.Kind() { + case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: + return t, false + default: + found = true + t = e + } } - - return nil, false + return t, found } func isIntegerOrArithmeticOperation(node ast.Node) bool { diff --git a/vendor/github.com/antonmedv/expr/compiler/compiler.go b/vendor/github.com/antonmedv/expr/compiler/compiler.go index 36ac92f233..3cd32af0f2 100644 --- a/vendor/github.com/antonmedv/expr/compiler/compiler.go +++ b/vendor/github.com/antonmedv/expr/compiler/compiler.go @@ -1,9 +1,7 @@ package compiler import ( - "encoding/binary" "fmt" - "math" "reflect" "github.com/antonmedv/expr/ast" @@ -11,6 +9,11 @@ import ( "github.com/antonmedv/expr/file" "github.com/antonmedv/expr/parser" . "github.com/antonmedv/expr/vm" + "github.com/antonmedv/expr/vm/runtime" +) + +const ( + placeholder = 12345 ) func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err error) { @@ -21,8 +24,9 @@ func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err erro }() c := &compiler{ - index: make(map[interface{}]uint16), - locations: make(map[int]file.Location), + locations: make([]file.Location, 0), + constantsIndex: make(map[interface{}]int), + functionsIndex: make(map[string]int), } if config != nil { @@ -33,87 +37,115 @@ func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err erro c.compile(tree.Node) switch c.cast { + case reflect.Int: + c.emit(OpCast, 0) case reflect.Int64: - c.emit(OpCast, encode(0)...) + c.emit(OpCast, 1) case reflect.Float64: - c.emit(OpCast, encode(1)...) + c.emit(OpCast, 2) } program = &Program{ + Node: tree.Node, Source: tree.Source, Locations: c.locations, Constants: c.constants, Bytecode: c.bytecode, + Arguments: c.arguments, + Functions: c.functions, } return } type compiler struct { - locations map[int]file.Location - constants []interface{} - bytecode []byte - index map[interface{}]uint16 - mapEnv bool - cast reflect.Kind - nodes []ast.Node -} - -func (c *compiler) emit(op byte, b ...byte) int { + locations []file.Location + bytecode []Opcode + constants []interface{} + constantsIndex map[interface{}]int + functions []Function + functionsIndex map[string]int + mapEnv bool + cast reflect.Kind + nodes []ast.Node + chains [][]int + arguments []int +} + +func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int { c.bytecode = append(c.bytecode, op) current := len(c.bytecode) - c.bytecode = append(c.bytecode, b...) + c.arguments = append(c.arguments, arg) + c.locations = append(c.locations, loc) + return current +} +func (c *compiler) emit(op Opcode, args ...int) int { + arg := 0 + if len(args) > 1 { + panic("too many arguments") + } + if len(args) == 1 { + arg = args[0] + } var loc file.Location if len(c.nodes) > 0 { loc = c.nodes[len(c.nodes)-1].Location() } - c.locations[current-1] = loc - - return current + return c.emitLocation(loc, op, arg) } func (c *compiler) emitPush(value interface{}) int { - return c.emit(OpPush, c.makeConstant(value)...) + return c.emit(OpPush, c.addConstant(value)) } -func (c *compiler) makeConstant(i interface{}) []byte { - hashable := true - switch reflect.TypeOf(i).Kind() { - case reflect.Slice, reflect.Map: - hashable = false +func (c *compiler) addConstant(constant interface{}) int { + indexable := true + hash := constant + switch reflect.TypeOf(constant).Kind() { + case reflect.Slice, reflect.Map, reflect.Struct: + indexable = false } - - if hashable { - if p, ok := c.index[i]; ok { - return encode(p) - } + if field, ok := constant.(*runtime.Field); ok { + indexable = true + hash = fmt.Sprintf("%v", field) } - - c.constants = append(c.constants, i) - if len(c.constants) > math.MaxUint16 { - panic("exceeded constants max space limit") + if method, ok := constant.(*runtime.Method); ok { + indexable = true + hash = fmt.Sprintf("%v", method) } - - p := uint16(len(c.constants) - 1) - if hashable { - c.index[i] = p + if indexable { + if p, ok := c.constantsIndex[hash]; ok { + return p + } + } + c.constants = append(c.constants, constant) + p := len(c.constants) - 1 + if indexable { + c.constantsIndex[hash] = p } - return encode(p) + return p } -func (c *compiler) placeholder() []byte { - return []byte{0xFF, 0xFF} +func (c *compiler) addFunction(node *ast.CallNode) int { + if node.Func == nil { + panic("function is nil") + } + if p, ok := c.functionsIndex[node.Func.Name]; ok { + return p + } + p := len(c.functions) + c.functions = append(c.functions, node.Func.Func) + c.functionsIndex[node.Func.Name] = p + return p } func (c *compiler) patchJump(placeholder int) { - offset := len(c.bytecode) - 2 - placeholder - b := encode(uint16(offset)) - c.bytecode[placeholder] = b[0] - c.bytecode[placeholder+1] = b[1] + offset := len(c.bytecode) - placeholder + c.arguments[placeholder-1] = offset } -func (c *compiler) calcBackwardJump(to int) []byte { - return encode(uint16(len(c.bytecode) + 1 + 2 - to)) +func (c *compiler) calcBackwardJump(to int) int { + return len(c.bytecode) + 1 - to } func (c *compiler) compile(node ast.Node) { @@ -141,18 +173,14 @@ func (c *compiler) compile(node ast.Node) { c.UnaryNode(n) case *ast.BinaryNode: c.BinaryNode(n) - case *ast.MatchesNode: - c.MatchesNode(n) - case *ast.PropertyNode: - c.PropertyNode(n) - case *ast.IndexNode: - c.IndexNode(n) + case *ast.ChainNode: + c.ChainNode(n) + case *ast.MemberNode: + c.MemberNode(n) case *ast.SliceNode: c.SliceNode(n) - case *ast.MethodNode: - c.MethodNode(n) - case *ast.FunctionNode: - c.FunctionNode(n) + case *ast.CallNode: + c.CallNode(n) case *ast.BuiltinNode: c.BuiltinNode(n) case *ast.ClosureNode: @@ -172,18 +200,30 @@ func (c *compiler) compile(node ast.Node) { } } -func (c *compiler) NilNode(node *ast.NilNode) { +func (c *compiler) NilNode(_ *ast.NilNode) { c.emit(OpNil) } func (c *compiler) IdentifierNode(node *ast.IdentifierNode) { - v := c.makeConstant(node.Value) if c.mapEnv { - c.emit(OpFetchMap, v...) - } else if node.NilSafe { - c.emit(OpFetchNilSafe, v...) + c.emit(OpLoadFast, c.addConstant(node.Value)) + } else if len(node.FieldIndex) > 0 { + c.emit(OpLoadField, c.addConstant(&runtime.Field{ + Index: node.FieldIndex, + Path: []string{node.Value}, + })) + } else if node.Method { + c.emit(OpLoadMethod, c.addConstant(&runtime.Method{ + Name: node.Value, + Index: node.MethodIndex, + })) } else { - c.emit(OpFetch, v...) + c.emit(OpLoadConst, c.addConstant(node.Value)) + } + if node.Deref { + c.emit(OpDeref) + } else if node.Type() == nil { + c.emit(OpDeref) } } @@ -193,15 +233,13 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { c.emitPush(node.Value) return } - switch t.Kind() { case reflect.Float32: c.emitPush(float32(node.Value)) case reflect.Float64: c.emitPush(float64(node.Value)) - case reflect.Int: - c.emitPush(int(node.Value)) + c.emitPush(node.Value) case reflect.Int8: c.emitPush(int8(node.Value)) case reflect.Int16: @@ -210,7 +248,6 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { c.emitPush(int32(node.Value)) case reflect.Int64: c.emitPush(int64(node.Value)) - case reflect.Uint: c.emitPush(uint(node.Value)) case reflect.Uint8: @@ -221,7 +258,6 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { c.emitPush(uint32(node.Value)) case reflect.Uint64: c.emitPush(uint64(node.Value)) - default: c.emitPush(node.Value) } @@ -291,29 +327,18 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { case "or", "||": c.compile(node.Left) - end := c.emit(OpJumpIfTrue, c.placeholder()...) + end := c.emit(OpJumpIfTrue, placeholder) c.emit(OpPop) c.compile(node.Right) c.patchJump(end) case "and", "&&": c.compile(node.Left) - end := c.emit(OpJumpIfFalse, c.placeholder()...) + end := c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) c.compile(node.Right) c.patchJump(end) - case "in": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpIn) - - case "not in": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpIn) - c.emit(OpNot) - case "<": c.compile(node.Left) c.compile(node.Right) @@ -359,11 +384,26 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { c.compile(node.Right) c.emit(OpModulo) - case "**": + case "**", "^": c.compile(node.Left) c.compile(node.Right) c.emit(OpExponent) + case "in": + c.compile(node.Left) + c.compile(node.Right) + c.emit(OpIn) + + case "matches": + if node.Regexp != nil { + c.compile(node.Left) + c.emit(OpMatchesConst, c.addConstant(node.Regexp)) + } else { + c.compile(node.Left) + c.compile(node.Right) + c.emit(OpMatches) + } + case "contains": c.compile(node.Left) c.compile(node.Right) @@ -384,36 +424,94 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { c.compile(node.Right) c.emit(OpRange) + case "??": + c.compile(node.Left) + end := c.emit(OpJumpIfNotNil, placeholder) + c.emit(OpPop) + c.compile(node.Right) + c.patchJump(end) + default: panic(fmt.Sprintf("unknown operator (%v)", node.Operator)) } } -func (c *compiler) MatchesNode(node *ast.MatchesNode) { - if node.Regexp != nil { - c.compile(node.Left) - c.emit(OpMatchesConst, c.makeConstant(node.Regexp)...) - return +func (c *compiler) ChainNode(node *ast.ChainNode) { + c.chains = append(c.chains, []int{}) + c.compile(node.Node) + // Chain activate (got nit somewhere) + for _, ph := range c.chains[len(c.chains)-1] { + c.patchJump(ph) } - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpMatches) + c.chains = c.chains[:len(c.chains)-1] } -func (c *compiler) PropertyNode(node *ast.PropertyNode) { - c.compile(node.Node) - if !node.NilSafe { - c.emit(OpProperty, c.makeConstant(node.Property)...) +func (c *compiler) MemberNode(node *ast.MemberNode) { + if node.Method { + c.compile(node.Node) + c.emit(OpMethod, c.addConstant(&runtime.Method{ + Name: node.Name, + Index: node.MethodIndex, + })) + return + } + op := OpFetch + original := node + index := node.FieldIndex + path := []string{node.Name} + base := node.Node + if len(node.FieldIndex) > 0 { + op = OpFetchField + for !node.Optional { + ident, ok := base.(*ast.IdentifierNode) + if ok && len(ident.FieldIndex) > 0 { + if ident.Deref { + panic("IdentifierNode should not be dereferenced") + } + index = append(ident.FieldIndex, index...) + path = append([]string{ident.Value}, path...) + c.emitLocation(ident.Location(), OpLoadField, c.addConstant( + &runtime.Field{Index: index, Path: path}, + )) + goto deref + } + member, ok := base.(*ast.MemberNode) + if ok && len(member.FieldIndex) > 0 { + if member.Deref { + panic("MemberNode should not be dereferenced") + } + index = append(member.FieldIndex, index...) + path = append([]string{member.Name}, path...) + node = member + base = member.Node + } else { + break + } + } + } + + c.compile(base) + if node.Optional { + ph := c.emit(OpJumpIfNil, placeholder) + c.chains[len(c.chains)-1] = append(c.chains[len(c.chains)-1], ph) + } + + if op == OpFetch { + c.compile(node.Property) + c.emit(OpFetch) } else { - c.emit(OpPropertyNilSafe, c.makeConstant(node.Property)...) + c.emitLocation(node.Location(), op, c.addConstant( + &runtime.Field{Index: index, Path: path}, + )) } -} -func (c *compiler) IndexNode(node *ast.IndexNode) { - c.compile(node.Node) - c.compile(node.Index) - c.emit(OpIndex) +deref: + if original.Deref { + c.emit(OpDeref) + } else if original.Type() == nil { + c.emit(OpDeref) + } } func (c *compiler) SliceNode(node *ast.SliceNode) { @@ -431,44 +529,50 @@ func (c *compiler) SliceNode(node *ast.SliceNode) { c.emit(OpSlice) } -func (c *compiler) MethodNode(node *ast.MethodNode) { - c.compile(node.Node) +func (c *compiler) CallNode(node *ast.CallNode) { for _, arg := range node.Arguments { c.compile(arg) } - if !node.NilSafe { - c.emit(OpMethod, c.makeConstant(Call{Name: node.Method, Size: len(node.Arguments)})...) - } else { - c.emit(OpMethodNilSafe, c.makeConstant(Call{Name: node.Method, Size: len(node.Arguments)})...) - } -} - -func (c *compiler) FunctionNode(node *ast.FunctionNode) { - for _, arg := range node.Arguments { - c.compile(arg) + if node.Func != nil { + if node.Func.Opcode > 0 { + c.emit(OpBuiltin, node.Func.Opcode) + return + } + switch len(node.Arguments) { + case 0: + c.emit(OpCall0, c.addFunction(node)) + case 1: + c.emit(OpCall1, c.addFunction(node)) + case 2: + c.emit(OpCall2, c.addFunction(node)) + case 3: + c.emit(OpCall3, c.addFunction(node)) + default: + c.emit(OpLoadFunc, c.addFunction(node)) + c.emit(OpCallN, len(node.Arguments)) + } + return } - op := OpCall - if node.Fast { - op = OpCallFast + c.compile(node.Callee) + if node.Typed > 0 { + c.emit(OpCallTyped, node.Typed) + return + } else if node.Fast { + c.emit(OpCallFast, len(node.Arguments)) + } else { + c.emit(OpCall, len(node.Arguments)) } - c.emit(op, c.makeConstant(Call{Name: node.Name, Size: len(node.Arguments)})...) } func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { switch node.Name { - case "len": - c.compile(node.Arguments[0]) - c.emit(OpLen) - c.emit(OpRot) - c.emit(OpPop) - case "all": c.compile(node.Arguments[0]) c.emit(OpBegin) var loopBreak int c.emitLoop(func() { c.compile(node.Arguments[1]) - loopBreak = c.emit(OpJumpIfFalse, c.placeholder()...) + loopBreak = c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) }) c.emit(OpTrue) @@ -482,7 +586,7 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.emitLoop(func() { c.compile(node.Arguments[1]) c.emit(OpNot) - loopBreak = c.emit(OpJumpIfFalse, c.placeholder()...) + loopBreak = c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) }) c.emit(OpTrue) @@ -495,7 +599,7 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { var loopBreak int c.emitLoop(func() { c.compile(node.Arguments[1]) - loopBreak = c.emit(OpJumpIfTrue, c.placeholder()...) + loopBreak = c.emit(OpJumpIfTrue, placeholder) c.emit(OpPop) }) c.emit(OpFalse) @@ -503,65 +607,53 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.emit(OpEnd) case "one": - count := c.makeConstant("count") c.compile(node.Arguments[0]) c.emit(OpBegin) - c.emitPush(0) - c.emit(OpStore, count...) c.emitLoop(func() { c.compile(node.Arguments[1]) c.emitCond(func() { - c.emit(OpInc, count...) + c.emit(OpIncrementCount) }) }) - c.emit(OpLoad, count...) + c.emit(OpGetCount) c.emitPush(1) c.emit(OpEqual) c.emit(OpEnd) case "filter": - count := c.makeConstant("count") c.compile(node.Arguments[0]) c.emit(OpBegin) - c.emitPush(0) - c.emit(OpStore, count...) c.emitLoop(func() { c.compile(node.Arguments[1]) c.emitCond(func() { - c.emit(OpInc, count...) - - c.emit(OpLoad, c.makeConstant("array")...) - c.emit(OpLoad, c.makeConstant("i")...) - c.emit(OpIndex) + c.emit(OpIncrementCount) + c.emit(OpPointer) }) }) - c.emit(OpLoad, count...) + c.emit(OpGetCount) c.emit(OpEnd) c.emit(OpArray) case "map": c.compile(node.Arguments[0]) c.emit(OpBegin) - size := c.emitLoop(func() { + c.emitLoop(func() { c.compile(node.Arguments[1]) }) - c.emit(OpLoad, size...) + c.emit(OpGetLen) c.emit(OpEnd) c.emit(OpArray) case "count": - count := c.makeConstant("count") c.compile(node.Arguments[0]) c.emit(OpBegin) - c.emitPush(0) - c.emit(OpStore, count...) c.emitLoop(func() { c.compile(node.Arguments[1]) c.emitCond(func() { - c.emit(OpInc, count...) + c.emit(OpIncrementCount) }) }) - c.emit(OpLoad, count...) + c.emit(OpGetCount) c.emit(OpEnd) default: @@ -570,44 +662,26 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { } func (c *compiler) emitCond(body func()) { - noop := c.emit(OpJumpIfFalse, c.placeholder()...) + noop := c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) body() - jmp := c.emit(OpJump, c.placeholder()...) + jmp := c.emit(OpJump, placeholder) c.patchJump(noop) c.emit(OpPop) c.patchJump(jmp) } -func (c *compiler) emitLoop(body func()) []byte { - i := c.makeConstant("i") - size := c.makeConstant("size") - array := c.makeConstant("array") - - c.emit(OpLen) - c.emit(OpStore, size...) - c.emit(OpStore, array...) - c.emitPush(0) - c.emit(OpStore, i...) - - cond := len(c.bytecode) - c.emit(OpLoad, i...) - c.emit(OpLoad, size...) - c.emit(OpLess) - end := c.emit(OpJumpIfFalse, c.placeholder()...) - c.emit(OpPop) +func (c *compiler) emitLoop(body func()) { + begin := len(c.bytecode) + end := c.emit(OpJumpIfEnd, placeholder) body() - c.emit(OpInc, i...) - c.emit(OpJumpBackward, c.calcBackwardJump(cond)...) - + c.emit(OpIncrementIt) + c.emit(OpJumpBackward, c.calcBackwardJump(begin)) c.patchJump(end) - c.emit(OpPop) - - return size } func (c *compiler) ClosureNode(node *ast.ClosureNode) { @@ -615,18 +689,16 @@ func (c *compiler) ClosureNode(node *ast.ClosureNode) { } func (c *compiler) PointerNode(node *ast.PointerNode) { - c.emit(OpLoad, c.makeConstant("array")...) - c.emit(OpLoad, c.makeConstant("i")...) - c.emit(OpIndex) + c.emit(OpPointer) } func (c *compiler) ConditionalNode(node *ast.ConditionalNode) { c.compile(node.Cond) - otherwise := c.emit(OpJumpIfFalse, c.placeholder()...) + otherwise := c.emit(OpJumpIfFalse, placeholder) c.emit(OpPop) c.compile(node.Exp1) - end := c.emit(OpJump, c.placeholder()...) + end := c.emit(OpJump, placeholder) c.patchJump(otherwise) c.emit(OpPop) @@ -658,12 +730,6 @@ func (c *compiler) PairNode(node *ast.PairNode) { c.compile(node.Value) } -func encode(i uint16) []byte { - b := make([]byte, 2) - binary.LittleEndian.PutUint16(b, i) - return b -} - func kind(node ast.Node) reflect.Kind { t := node.Type() if t == nil { diff --git a/vendor/github.com/antonmedv/expr/compiler/patcher.go b/vendor/github.com/antonmedv/expr/compiler/patcher.go deleted file mode 100644 index 2491dec9d4..0000000000 --- a/vendor/github.com/antonmedv/expr/compiler/patcher.go +++ /dev/null @@ -1,44 +0,0 @@ -package compiler - -import ( - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/conf" -) - -type operatorPatcher struct { - ops map[string][]string - types conf.TypesTable -} - -func (p *operatorPatcher) Enter(node *ast.Node) {} -func (p *operatorPatcher) Exit(node *ast.Node) { - binaryNode, ok := (*node).(*ast.BinaryNode) - if !ok { - return - } - - fns, ok := p.ops[binaryNode.Operator] - if !ok { - return - } - - leftType := binaryNode.Left.Type() - rightType := binaryNode.Right.Type() - - _, fn, ok := conf.FindSuitableOperatorOverload(fns, p.types, leftType, rightType) - if ok { - newNode := &ast.FunctionNode{ - Name: fn, - Arguments: []ast.Node{binaryNode.Left, binaryNode.Right}, - } - ast.Patch(node, newNode) - } -} - -func PatchOperators(node *ast.Node, config *conf.Config) { - if len(config.Operators) == 0 { - return - } - patcher := &operatorPatcher{ops: config.Operators, types: config.Types} - ast.Walk(node, patcher) -} diff --git a/vendor/github.com/antonmedv/expr/conf/config.go b/vendor/github.com/antonmedv/expr/conf/config.go index 7ba07fe0d5..1ac0fa7d29 100644 --- a/vendor/github.com/antonmedv/expr/conf/config.go +++ b/vendor/github.com/antonmedv/expr/conf/config.go @@ -5,24 +5,46 @@ import ( "reflect" "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/vm" + "github.com/antonmedv/expr/builtin" + "github.com/antonmedv/expr/vm/runtime" ) type Config struct { - Env interface{} - MapEnv bool - Types TypesTable - Operators OperatorsTable - Expect reflect.Kind - Optimize bool - Strict bool - DefaultType reflect.Type - ConstExprFns map[string]reflect.Value - Visitors []ast.Visitor - err error + Env interface{} + Types TypesTable + MapEnv bool + DefaultType reflect.Type + Operators OperatorsTable + Expect reflect.Kind + Optimize bool + Strict bool + ConstFns map[string]reflect.Value + Visitors []ast.Visitor + Functions map[string]*builtin.Function } +// CreateNew creates new config with default values. +func CreateNew() *Config { + c := &Config{ + Operators: make(map[string][]string), + ConstFns: make(map[string]reflect.Value), + Functions: make(map[string]*builtin.Function), + Optimize: true, + } + for _, f := range builtin.Builtins { + c.Functions[f.Name] = f + } + return c +} + +// New creates new config with environment. func New(env interface{}) *Config { + c := CreateNew() + c.WithEnv(env) + return c +} + +func (c *Config) WithEnv(env interface{}) { var mapEnv bool var mapValueType reflect.Type if _, ok := env.(map[string]interface{}); ok { @@ -33,57 +55,42 @@ func New(env interface{}) *Config { } } - return &Config{ - Env: env, - MapEnv: mapEnv, - Types: CreateTypesTable(env), - Optimize: true, - Strict: true, - DefaultType: mapValueType, - ConstExprFns: make(map[string]reflect.Value), + c.Env = env + c.Types = CreateTypesTable(env) + c.MapEnv = mapEnv + c.DefaultType = mapValueType + c.Strict = true +} + +func (c *Config) Operator(operator string, fns ...string) { + c.Operators[operator] = append(c.Operators[operator], fns...) +} + +func (c *Config) ConstExpr(name string) { + if c.Env == nil { + panic("no environment is specified for ConstExpr()") + } + fn := reflect.ValueOf(runtime.Fetch(c.Env, name)) + if fn.Kind() != reflect.Func { + panic(fmt.Errorf("const expression %q must be a function", name)) } + c.ConstFns[name] = fn } -// Check validates the compiler configuration. -func (c *Config) Check() error { - // Check that all functions that define operator overloading - // exist in environment and have correct signatures. - for op, fns := range c.Operators { +func (c *Config) Check() { + for operator, fns := range c.Operators { for _, fn := range fns { fnType, ok := c.Types[fn] if !ok || fnType.Type.Kind() != reflect.Func { - return fmt.Errorf("function %s for %s operator does not exist in environment", fn, op) + panic(fmt.Errorf("function %s for %s operator does not exist in the environment", fn, operator)) } requiredNumIn := 2 if fnType.Method { requiredNumIn = 3 // As first argument of method is receiver. } if fnType.Type.NumIn() != requiredNumIn || fnType.Type.NumOut() != 1 { - return fmt.Errorf("function %s for %s operator does not have a correct signature", fn, op) + panic(fmt.Errorf("function %s for %s operator does not have a correct signature", fn, operator)) } } } - - // Check that all ConstExprFns are functions. - for name, fn := range c.ConstExprFns { - if fn.Kind() != reflect.Func { - return fmt.Errorf("const expression %q must be a function", name) - } - } - - return c.err -} - -func (c *Config) ConstExpr(name string) { - if c.Env == nil { - c.Error(fmt.Errorf("no environment for const expression: %v", name)) - return - } - c.ConstExprFns[name] = vm.FetchFn(c.Env, name) -} - -func (c *Config) Error(err error) { - if c.err == nil { - c.err = err - } } diff --git a/vendor/github.com/antonmedv/expr/conf/functions.go b/vendor/github.com/antonmedv/expr/conf/functions.go new file mode 100644 index 0000000000..8f52a95575 --- /dev/null +++ b/vendor/github.com/antonmedv/expr/conf/functions.go @@ -0,0 +1 @@ +package conf diff --git a/vendor/github.com/antonmedv/expr/conf/operators_table.go b/vendor/github.com/antonmedv/expr/conf/operators.go similarity index 59% rename from vendor/github.com/antonmedv/expr/conf/operators_table.go rename to vendor/github.com/antonmedv/expr/conf/operators.go index 0ceb844002..13e069d76c 100644 --- a/vendor/github.com/antonmedv/expr/conf/operators_table.go +++ b/vendor/github.com/antonmedv/expr/conf/operators.go @@ -1,6 +1,10 @@ package conf -import "reflect" +import ( + "reflect" + + "github.com/antonmedv/expr/ast" +) // OperatorsTable maps binary operators to corresponding list of functions. // Functions should be provided in the environment to allow operator overloading. @@ -24,3 +28,32 @@ func FindSuitableOperatorOverload(fns []string, types TypesTable, l, r reflect.T } return nil, "", false } + +type OperatorPatcher struct { + Operators OperatorsTable + Types TypesTable +} + +func (p *OperatorPatcher) Visit(node *ast.Node) { + binaryNode, ok := (*node).(*ast.BinaryNode) + if !ok { + return + } + + fns, ok := p.Operators[binaryNode.Operator] + if !ok { + return + } + + leftType := binaryNode.Left.Type() + rightType := binaryNode.Right.Type() + + _, fn, ok := FindSuitableOperatorOverload(fns, p.Types, leftType, rightType) + if ok { + newNode := &ast.CallNode{ + Callee: &ast.IdentifierNode{Value: fn}, + Arguments: []ast.Node{binaryNode.Left, binaryNode.Right}, + } + ast.Patch(node, newNode) + } +} diff --git a/vendor/github.com/antonmedv/expr/conf/types_table.go b/vendor/github.com/antonmedv/expr/conf/types_table.go index d5539da4ae..e917f5fa84 100644 --- a/vendor/github.com/antonmedv/expr/conf/types_table.go +++ b/vendor/github.com/antonmedv/expr/conf/types_table.go @@ -1,11 +1,15 @@ package conf -import "reflect" +import ( + "reflect" +) type Tag struct { - Type reflect.Type - Method bool - Ambiguous bool + Type reflect.Type + Ambiguous bool + FieldIndex []int + Method bool + MethodIndex int } type TypesTable map[string]Tag @@ -39,7 +43,11 @@ func CreateTypesTable(i interface{}) TypesTable { // all embedded structs methods as well, no need to recursion. for i := 0; i < t.NumMethod(); i++ { m := t.Method(i) - types[m.Name] = Tag{Type: m.Type, Method: true} + types[m.Name] = Tag{ + Type: m.Type, + Method: true, + MethodIndex: i, + } } case reflect.Map: @@ -53,7 +61,11 @@ func CreateTypesTable(i interface{}) TypesTable { // A map may have method too. for i := 0; i < t.NumMethod(); i++ { m := t.Method(i) - types[m.Name] = Tag{Type: m.Type, Method: true} + types[m.Name] = Tag{ + Type: m.Type, + Method: true, + MethodIndex: i, + } } } @@ -77,12 +89,16 @@ func FieldsFromStruct(t reflect.Type) TypesTable { if _, ok := types[name]; ok { types[name] = Tag{Ambiguous: true} } else { + typ.FieldIndex = append(f.Index, typ.FieldIndex...) types[name] = typ } } } - types[f.Name] = Tag{Type: f.Type} + types[FieldName(f)] = Tag{ + Type: f.Type, + FieldIndex: f.Index, + } } } @@ -98,3 +114,10 @@ func dereference(t reflect.Type) reflect.Type { } return t } + +func FieldName(field reflect.StructField) string { + if taggedName := field.Tag.Get("expr"); taggedName != "" { + return taggedName + } + return field.Name +} diff --git a/vendor/github.com/antonmedv/expr/expr.go b/vendor/github.com/antonmedv/expr/expr.go index 05c54adb18..14f6af285c 100644 --- a/vendor/github.com/antonmedv/expr/expr.go +++ b/vendor/github.com/antonmedv/expr/expr.go @@ -2,13 +2,14 @@ package expr import ( "fmt" - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/file" "reflect" + "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/builtin" "github.com/antonmedv/expr/checker" "github.com/antonmedv/expr/compiler" "github.com/antonmedv/expr/conf" + "github.com/antonmedv/expr/file" "github.com/antonmedv/expr/optimizer" "github.com/antonmedv/expr/parser" "github.com/antonmedv/expr/vm" @@ -17,30 +18,6 @@ import ( // Option for configuring config. type Option func(c *conf.Config) -// Eval parses, compiles and runs given input. -func Eval(input string, env interface{}) (interface{}, error) { - if _, ok := env.(Option); ok { - return nil, fmt.Errorf("misused expr.Eval: second argument (env) should be passed without expr.Env") - } - - tree, err := parser.Parse(input) - if err != nil { - return nil, err - } - - program, err := compiler.Compile(tree, nil) - if err != nil { - return nil, err - } - - output, err := vm.Run(program, env) - if err != nil { - return nil, err - } - - return output, nil -} - // Env specifies expected input of env for type checks. // If struct is passed, all fields will be treated as variables, // as well as all fields of embedded structs and struct itself. @@ -48,33 +25,22 @@ func Eval(input string, env interface{}) (interface{}, error) { // Methods defined on this type will be available as functions. func Env(env interface{}) Option { return func(c *conf.Config) { - if _, ok := env.(map[string]interface{}); ok { - c.MapEnv = true - } else { - if reflect.ValueOf(env).Kind() == reflect.Map { - c.DefaultType = reflect.TypeOf(env).Elem() - } - } - c.Strict = true - c.Types = conf.CreateTypesTable(env) - c.Env = env + c.WithEnv(env) } } // AllowUndefinedVariables allows to use undefined variables inside expressions. // This can be used with expr.Env option to partially define a few variables. -// Note what this option is only works in map environment are used, otherwise -// runtime.fetch will panic as there is no way to get missing field zero value. func AllowUndefinedVariables() Option { return func(c *conf.Config) { c.Strict = false } } -// Operator allows to override binary operator with function. +// Operator allows to replace a binary operator with a function. func Operator(operator string, fn ...string) Option { return func(c *conf.Config) { - c.Operators[operator] = append(c.Operators[operator], fn...) + c.Operator(operator, fn...) } } @@ -86,21 +52,35 @@ func ConstExpr(fn string) Option { } } -// AsBool tells the compiler to expect boolean result. +// AsKind tells the compiler to expect kind of the result. +func AsKind(kind reflect.Kind) Option { + return func(c *conf.Config) { + c.Expect = kind + } +} + +// AsBool tells the compiler to expect a boolean result. func AsBool() Option { return func(c *conf.Config) { c.Expect = reflect.Bool } } -// AsInt64 tells the compiler to expect int64 result. +// AsInt tells the compiler to expect an int result. +func AsInt() Option { + return func(c *conf.Config) { + c.Expect = reflect.Int + } +} + +// AsInt64 tells the compiler to expect an int64 result. func AsInt64() Option { return func(c *conf.Config) { c.Expect = reflect.Int64 } } -// AsFloat64 tells the compiler to expect float64 result. +// AsFloat64 tells the compiler to expect a float64 result. func AsFloat64() Option { return func(c *conf.Config) { c.Expect = reflect.Float64 @@ -121,20 +101,42 @@ func Patch(visitor ast.Visitor) Option { } } +// Function adds function to list of functions what will be available in expressions. +func Function(name string, fn func(params ...interface{}) (interface{}, error), types ...interface{}) Option { + return func(c *conf.Config) { + ts := make([]reflect.Type, len(types)) + for i, t := range types { + t := reflect.TypeOf(t) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Func { + panic(fmt.Sprintf("expr: type of %s is not a function", name)) + } + ts[i] = t + } + c.Functions[name] = &builtin.Function{ + Name: name, + Func: fn, + Types: ts, + } + } +} + // Compile parses and compiles given input expression to bytecode program. func Compile(input string, ops ...Option) (*vm.Program, error) { - config := &conf.Config{ - Operators: make(map[string][]string), - ConstExprFns: make(map[string]reflect.Value), - Optimize: true, - } + config := conf.CreateNew() for _, op := range ops { op(config) } + config.Check() - if err := config.Check(); err != nil { - return nil, err + if len(config.Operators) > 0 { + config.Visitors = append(config.Visitors, &conf.OperatorPatcher{ + Operators: config.Operators, + Types: config.Types, + }) } tree, err := parser.Parse(input) @@ -142,25 +144,22 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { return nil, err } - _, err = checker.Check(tree, config) - - // If we have a patch to apply, it may fix out error and - // second type check is needed. Otherwise it is an error. - if err != nil && len(config.Visitors) == 0 { - return nil, err - } - - // Patch operators before Optimize, as we may also mark it as ConstExpr. - compiler.PatchOperators(&tree.Node, config) - - if len(config.Visitors) >= 0 { + if len(config.Visitors) > 0 { for _, v := range config.Visitors { + // We need to perform types check, because some visitors may rely on + // types information available in the tree. + _, _ = checker.Check(tree, config) ast.Walk(&tree.Node, v) } _, err = checker.Check(tree, config) if err != nil { return nil, err } + } else { + _, err = checker.Check(tree, config) + if err != nil { + return nil, err + } } if config.Optimize { @@ -185,3 +184,22 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { func Run(program *vm.Program, env interface{}) (interface{}, error) { return vm.Run(program, env) } + +// Eval parses, compiles and runs given input. +func Eval(input string, env interface{}) (interface{}, error) { + if _, ok := env.(Option); ok { + return nil, fmt.Errorf("misused expr.Eval: second argument (env) should be passed without expr.Env") + } + + program, err := Compile(input) + if err != nil { + return nil, err + } + + output, err := Run(program, env) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/vendor/github.com/antonmedv/expr/file/error.go b/vendor/github.com/antonmedv/expr/file/error.go index b7af3e6e24..1e7e81b947 100644 --- a/vendor/github.com/antonmedv/expr/file/error.go +++ b/vendor/github.com/antonmedv/expr/file/error.go @@ -10,6 +10,7 @@ type Error struct { Location Message string Snippet string + Prev error } func (e *Error) Error() string { @@ -44,6 +45,16 @@ func (e *Error) Bind(source *Source) *Error { return e } + +func (e *Error) Unwrap() error { + return e.Prev +} + +func (e *Error) Wrap(err error) { + e.Prev = err +} + + func (e *Error) format() string { if e.Location.Empty() { return e.Message diff --git a/vendor/github.com/antonmedv/expr/file/source.go b/vendor/github.com/antonmedv/expr/file/source.go index 185d1568ad..9ee297b580 100644 --- a/vendor/github.com/antonmedv/expr/file/source.go +++ b/vendor/github.com/antonmedv/expr/file/source.go @@ -74,22 +74,3 @@ func (s *Source) findLineOffset(line int) (int32, bool) { } return -1, false } - -// findLine finds the line that contains the given character offset and -// returns the line number and offset of the beginning of that line. -// Note that the last line is treated as if it contains all offsets -// beyond the end of the actual source. -func (s *Source) findLine(characterOffset int32) (int32, int32) { - var line int32 = 1 - for _, lineOffset := range s.lineOffsets { - if lineOffset > characterOffset { - break - } else { - line++ - } - } - if line == 1 { - return line, 0 - } - return line, s.lineOffsets[line-2] -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go index 85fcc337f0..7ececb3dba 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go +++ b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go @@ -2,20 +2,22 @@ package optimizer import ( "fmt" - . "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/file" "reflect" "strings" + + . "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/file" ) +var errorType = reflect.TypeOf((*error)(nil)).Elem() + type constExpr struct { applied bool err error fns map[string]reflect.Value } -func (*constExpr) Enter(*Node) {} -func (c *constExpr) Exit(node *Node) { +func (c *constExpr) Visit(node *Node) { defer func() { if r := recover(); r != nil { msg := fmt.Sprintf("%v", r) @@ -33,45 +35,51 @@ func (c *constExpr) Exit(node *Node) { Patch(node, newNode) } - switch n := (*node).(type) { - case *FunctionNode: - fn, ok := c.fns[n.Name] - if ok { - in := make([]reflect.Value, len(n.Arguments)) - for i := 0; i < len(n.Arguments); i++ { - arg := n.Arguments[i] - var param interface{} + if call, ok := (*node).(*CallNode); ok { + if name, ok := call.Callee.(*IdentifierNode); ok { + fn, ok := c.fns[name.Value] + if ok { + in := make([]reflect.Value, len(call.Arguments)) + for i := 0; i < len(call.Arguments); i++ { + arg := call.Arguments[i] + var param interface{} - switch a := arg.(type) { - case *NilNode: - param = nil - case *IntegerNode: - param = a.Value - case *FloatNode: - param = a.Value - case *BoolNode: - param = a.Value - case *StringNode: - param = a.Value - case *ConstantNode: - param = a.Value + switch a := arg.(type) { + case *NilNode: + param = nil + case *IntegerNode: + param = a.Value + case *FloatNode: + param = a.Value + case *BoolNode: + param = a.Value + case *StringNode: + param = a.Value + case *ConstantNode: + param = a.Value - default: - return // Const expr optimization not applicable. + default: + return // Const expr optimization not applicable. + } + + if param == nil && reflect.TypeOf(param) == nil { + // In case of nil value and nil type use this hack, + // otherwise reflect.Call will panic on zero value. + in[i] = reflect.ValueOf(¶m).Elem() + } else { + in[i] = reflect.ValueOf(param) + } } - if param == nil && reflect.TypeOf(param) == nil { - // In case of nil value and nil type use this hack, - // otherwise reflect.Call will panic on zero value. - in[i] = reflect.ValueOf(¶m).Elem() - } else { - in[i] = reflect.ValueOf(param) + out := fn.Call(in) + value := out[0].Interface() + if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() { + c.err = out[1].Interface().(error) + return } + constNode := &ConstantNode{Value: value} + patch(constNode) } - - out := fn.Call(in) - constNode := &ConstantNode{Value: out[0].Interface()} - patch(constNode) } } } diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_range.go b/vendor/github.com/antonmedv/expr/optimizer/const_range.go index 5205aa14fa..26d6d6f571 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/const_range.go +++ b/vendor/github.com/antonmedv/expr/optimizer/const_range.go @@ -6,8 +6,7 @@ import ( type constRange struct{} -func (*constRange) Enter(*Node) {} -func (*constRange) Exit(node *Node) { +func (*constRange) Visit(node *Node) { switch n := (*node).(type) { case *BinaryNode: if n.Operator == ".." { diff --git a/vendor/github.com/antonmedv/expr/optimizer/fold.go b/vendor/github.com/antonmedv/expr/optimizer/fold.go index 666912541e..b62b2d7ed4 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/fold.go +++ b/vendor/github.com/antonmedv/expr/optimizer/fold.go @@ -13,8 +13,7 @@ type fold struct { err *file.Error } -func (*fold) Enter(*Node) {} -func (fold *fold) Exit(node *Node) { +func (fold *fold) Visit(node *Node) { patch := func(newNode Node) { fold.applied = true Patch(node, newNode) @@ -33,48 +32,145 @@ func (fold *fold) Exit(node *Node) { if i, ok := n.Node.(*IntegerNode); ok { patchWithType(&IntegerNode{Value: -i.Value}, n.Node.Type()) } + if i, ok := n.Node.(*FloatNode); ok { + patchWithType(&FloatNode{Value: -i.Value}, n.Node.Type()) + } case "+": if i, ok := n.Node.(*IntegerNode); ok { patchWithType(&IntegerNode{Value: i.Value}, n.Node.Type()) } + if i, ok := n.Node.(*FloatNode); ok { + patchWithType(&FloatNode{Value: i.Value}, n.Node.Type()) + } + case "!", "not": + if a := toBool(n.Node); a != nil { + patch(&BoolNode{Value: !a.Value}) + } } case *BinaryNode: switch n.Operator { case "+": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { patchWithType(&IntegerNode{Value: a.Value + b.Value}, a.Type()) } } - if a, ok := n.Left.(*StringNode); ok { - if b, ok := n.Right.(*StringNode); ok { + { + a := toInteger(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: float64(a.Value) + b.Value}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value + float64(b.Value)}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value + b.Value}, a.Type()) + } + } + { + a := toString(n.Left) + b := toString(n.Right) + if a != nil && b != nil { patch(&StringNode{Value: a.Value + b.Value}) } } case "-": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { patchWithType(&IntegerNode{Value: a.Value - b.Value}, a.Type()) } } + { + a := toInteger(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: float64(a.Value) - b.Value}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value - float64(b.Value)}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value - b.Value}, a.Type()) + } + } case "*": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { patchWithType(&IntegerNode{Value: a.Value * b.Value}, a.Type()) } } + { + a := toInteger(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: float64(a.Value) * b.Value}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value * float64(b.Value)}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value * b.Value}, a.Type()) + } + } case "/": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { - if b.Value == 0 { - fold.err = &file.Error{ - Location: (*node).Location(), - Message: "integer divide by zero", - } - return - } - patchWithType(&IntegerNode{Value: a.Value / b.Value}, a.Type()) + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: float64(a.Value) / float64(b.Value)}, a.Type()) + } + } + { + a := toInteger(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: float64(a.Value) / b.Value}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value / float64(b.Value)}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: a.Value / b.Value}, a.Type()) } } case "%": @@ -90,44 +186,158 @@ func (fold *fold) Exit(node *Node) { patch(&IntegerNode{Value: a.Value % b.Value}) } } - case "**": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { - patch(&FloatNode{Value: math.Pow(float64(a.Value), float64(b.Value))}) + case "**", "^": + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), float64(b.Value))}, a.Type()) } } - } + { + a := toInteger(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), b.Value)}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: math.Pow(a.Value, float64(b.Value))}, a.Type()) + } + } + { + a := toFloat(n.Left) + b := toFloat(n.Right) + if a != nil && b != nil { + patchWithType(&FloatNode{Value: math.Pow(a.Value, b.Value)}, a.Type()) + } + } + case "and", "&&": + a := toBool(n.Left) + b := toBool(n.Right) - case *ArrayNode: - if len(n.Nodes) > 0 { + if a != nil && a.Value { // true and x + patch(n.Right) + } else if b != nil && b.Value { // x and true + patch(n.Left) + } else if (a != nil && !a.Value) || (b != nil && !b.Value) { // "x and false" or "false and x" + patch(&BoolNode{Value: false}) + } + case "or", "||": + a := toBool(n.Left) + b := toBool(n.Right) - for _, a := range n.Nodes { - if _, ok := a.(*IntegerNode); !ok { - goto string + if a != nil && !a.Value { // false or x + patch(n.Right) + } else if b != nil && !b.Value { // x or false + patch(n.Left) + } else if (a != nil && a.Value) || (b != nil && b.Value) { // "x or true" or "true or x" + patch(&BoolNode{Value: true}) + } + case "==": + { + a := toInteger(n.Left) + b := toInteger(n.Right) + if a != nil && b != nil { + patch(&BoolNode{Value: a.Value == b.Value}) } } { - value := make([]int, len(n.Nodes)) - for i, a := range n.Nodes { - value[i] = a.(*IntegerNode).Value + a := toString(n.Left) + b := toString(n.Right) + if a != nil && b != nil { + patch(&BoolNode{Value: a.Value == b.Value}) } - patch(&ConstantNode{Value: value}) } + { + a := toBool(n.Left) + b := toBool(n.Right) + if a != nil && b != nil { + patch(&BoolNode{Value: a.Value == b.Value}) + } + } + } - string: + case *ArrayNode: + if len(n.Nodes) > 0 { for _, a := range n.Nodes { - if _, ok := a.(*StringNode); !ok { + switch a.(type) { + case *IntegerNode, *FloatNode, *StringNode, *BoolNode: + continue + default: return } } - { - value := make([]string, len(n.Nodes)) - for i, a := range n.Nodes { - value[i] = a.(*StringNode).Value + value := make([]interface{}, len(n.Nodes)) + for i, a := range n.Nodes { + switch b := a.(type) { + case *IntegerNode: + value[i] = b.Value + case *FloatNode: + value[i] = b.Value + case *StringNode: + value[i] = b.Value + case *BoolNode: + value[i] = b.Value } - patch(&ConstantNode{Value: value}) } + patch(&ConstantNode{Value: value}) + } + case *BuiltinNode: + switch n.Name { + case "filter": + if len(n.Arguments) != 2 { + return + } + if base, ok := n.Arguments[0].(*BuiltinNode); ok && base.Name == "filter" { + patch(&BuiltinNode{ + Name: "filter", + Arguments: []Node{ + base.Arguments[0], + &BinaryNode{ + Operator: "&&", + Left: base.Arguments[1], + Right: n.Arguments[1], + }, + }, + }) + } } } } + +func toString(n Node) *StringNode { + switch a := n.(type) { + case *StringNode: + return a + } + return nil +} + +func toInteger(n Node) *IntegerNode { + switch a := n.(type) { + case *IntegerNode: + return a + } + return nil +} + +func toFloat(n Node) *FloatNode { + switch a := n.(type) { + case *FloatNode: + return a + } + return nil +} + +func toBool(n Node) *BoolNode { + switch a := n.(type) { + case *BoolNode: + return a + } + return nil +} diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_array.go b/vendor/github.com/antonmedv/expr/optimizer/in_array.go index 8156faa5e0..a51957631c 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/in_array.go +++ b/vendor/github.com/antonmedv/expr/optimizer/in_array.go @@ -8,11 +8,10 @@ import ( type inArray struct{} -func (*inArray) Enter(*Node) {} -func (*inArray) Exit(node *Node) { +func (*inArray) Visit(node *Node) { switch n := (*node).(type) { case *BinaryNode: - if n.Operator == "in" || n.Operator == "not in" { + if n.Operator == "in" { if array, ok := n.Right.(*ArrayNode); ok { if len(array.Nodes) > 0 { t := n.Left.Type() diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_range.go b/vendor/github.com/antonmedv/expr/optimizer/in_range.go index 177c91933d..7895249e0b 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/in_range.go +++ b/vendor/github.com/antonmedv/expr/optimizer/in_range.go @@ -6,11 +6,10 @@ import ( type inRange struct{} -func (*inRange) Enter(*Node) {} -func (*inRange) Exit(node *Node) { +func (*inRange) Visit(node *Node) { switch n := (*node).(type) { case *BinaryNode: - if n.Operator == "in" || n.Operator == "not in" { + if n.Operator == "in" { if rng, ok := n.Right.(*BinaryNode); ok && rng.Operator == ".." { if from, ok := rng.Left.(*IntegerNode); ok { if to, ok := rng.Right.(*IntegerNode); ok { @@ -27,12 +26,6 @@ func (*inRange) Exit(node *Node) { Right: to, }, }) - if n.Operator == "not in" { - Patch(node, &UnaryNode{ - Operator: "not", - Node: *node, - }) - } } } } diff --git a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go index 738348dc36..9c97496c8d 100644 --- a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go +++ b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go @@ -17,10 +17,10 @@ func Optimize(node *Node, config *conf.Config) error { break } } - if config != nil && len(config.ConstExprFns) > 0 { + if config != nil && len(config.ConstFns) > 0 { for limit := 100; limit >= 0; limit-- { constExpr := &constExpr{ - fns: config.ConstExprFns, + fns: config.ConstFns, } Walk(node, constExpr) if constExpr.err != nil { diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go index 6e4848a834..cfb1e8c61b 100644 --- a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go +++ b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go @@ -95,6 +95,11 @@ func (l *lexer) emitEOF() { l.startLoc = l.loc } +func (l *lexer) skip() { + l.start = l.end + l.startLoc = l.loc +} + func (l *lexer) word() string { return l.input[l.start:l.end] } @@ -118,14 +123,18 @@ func (l *lexer) acceptRun(valid string) { l.backup() } -func (l *lexer) acceptWord(word string) bool { - pos, loc, prev := l.end, l.loc, l.prev - - // Skip spaces (U+0020) if any +func (l *lexer) skipSpaces() { r := l.peek() for ; r == ' '; r = l.peek() { l.next() } + l.skip() +} + +func (l *lexer) acceptWord(word string) bool { + pos, loc, prev := l.end, l.loc, l.prev + + l.skipSpaces() for _, ch := range word { if l.next() != ch { @@ -133,7 +142,7 @@ func (l *lexer) acceptWord(word string) bool { return false } } - if r = l.peek(); r != ' ' && r != eof { + if r := l.peek(); r != ' ' && r != eof { l.end, l.loc, l.prev = pos, loc, prev return false } diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/state.go b/vendor/github.com/antonmedv/expr/parser/lexer/state.go index 0d4bece4b7..1212aa3217 100644 --- a/vendor/github.com/antonmedv/expr/parser/lexer/state.go +++ b/vendor/github.com/antonmedv/expr/parser/lexer/state.go @@ -25,15 +25,14 @@ func root(l *lexer) stateFn { l.backup() return number case r == '?': - if l.peek() == '.' { - return nilsafe - } - l.emit(Operator) + return questionMark + case r == '/': + return slash case strings.ContainsRune("([{", r): l.emit(Bracket) case strings.ContainsRune(")]}", r): l.emit(Bracket) - case strings.ContainsRune("#,?:%+-/", r): // single rune operator + case strings.ContainsRune("#,:%+-^", r): // single rune operator l.emit(Operator) case strings.ContainsRune("&|!=*<>", r): // possible double rune operator l.accept("&|=*") @@ -107,13 +106,6 @@ func dot(l *lexer) stateFn { return root } -func nilsafe(l *lexer) stateFn { - l.next() - l.accept("?.") - l.emit(Operator) - return root -} - func identifier(l *lexer) stateFn { loop: for { @@ -137,12 +129,70 @@ loop: } func not(l *lexer) stateFn { - switch l.acceptWord("in") { - case true: - l.emitValue(Operator, "not in") - case false: - l.emitValue(Operator, "not") + l.emit(Operator) + + l.skipSpaces() + + pos, loc, prev := l.end, l.loc, l.prev + + // Get the next word. + for { + r := l.next() + if IsAlphaNumeric(r) { + // absorb + } else { + l.backup() + break + } + } + + switch l.word() { + case "in", "matches", "contains", "startsWith", "endsWith": + l.emit(Operator) + default: + l.end, l.loc, l.prev = pos, loc, prev + } + return root +} + +func questionMark(l *lexer) stateFn { + l.accept(".?") + l.emit(Operator) + return root +} + +func slash(l *lexer) stateFn { + if l.accept("/") { + return singleLineComment + } + if l.accept("*") { + return multiLineComment } + l.emit(Operator) + return root +} +func singleLineComment(l *lexer) stateFn { + for { + r := l.next() + if r == eof || r == '\n' { + break + } + } + l.ignore() + return root +} + +func multiLineComment(l *lexer) stateFn { + for { + r := l.next() + if r == eof { + return l.error("unclosed comment") + } + if r == '*' && l.accept("/") { + break + } + } + l.ignore() return root } diff --git a/vendor/github.com/antonmedv/expr/parser/parser.go b/vendor/github.com/antonmedv/expr/parser/parser.go index 821de9d35c..fd26fe18bd 100644 --- a/vendor/github.com/antonmedv/expr/parser/parser.go +++ b/vendor/github.com/antonmedv/expr/parser/parser.go @@ -2,7 +2,6 @@ package parser import ( "fmt" - "regexp" "strconv" "strings" "unicode/utf8" @@ -31,8 +30,8 @@ type builtin struct { var unaryOperators = map[string]operator{ "not": {50, left}, "!": {50, left}, - "-": {500, left}, - "+": {500, left}, + "-": {90, left}, + "+": {90, left}, } var binaryOperators = map[string]operator{ @@ -46,7 +45,6 @@ var binaryOperators = map[string]operator{ ">": {20, left}, ">=": {20, left}, "<=": {20, left}, - "not in": {20, left}, "in": {20, left}, "matches": {20, left}, "contains": {20, left}, @@ -58,11 +56,12 @@ var binaryOperators = map[string]operator{ "*": {60, left}, "/": {60, left}, "%": {60, left}, - "**": {70, right}, + "**": {100, right}, + "^": {100, right}, + "??": {500, left}, } var builtins = map[string]builtin{ - "len": {1}, "all": {2}, "none": {2}, "any": {2}, @@ -115,9 +114,13 @@ func Parse(input string) (*Tree, error) { } func (p *parser) error(format string, args ...interface{}) { + p.errorAt(p.current, format, args...) +} + +func (p *parser) errorAt(token Token, format string, args ...interface{}) { if p.err == nil { // show first error p.err = &file.Error{ - Location: p.current.Location, + Location: token.Location, Message: fmt.Sprintf(format, args...), } } @@ -145,12 +148,28 @@ func (p *parser) expect(kind Kind, values ...string) { func (p *parser) parseExpression(precedence int) Node { nodeLeft := p.parsePrimary() - token := p.current - for token.Is(Operator) && p.err == nil { - if op, ok := binaryOperators[token.Value]; ok { + lastOperator := "" + opToken := p.current + for opToken.Is(Operator) && p.err == nil { + negate := false + var notToken Token + + if opToken.Is(Operator, "not") { + p.next() + notToken = p.current + negate = true + opToken = p.current + } + + if op, ok := binaryOperators[opToken.Value]; ok { if op.precedence >= precedence { p.next() + if lastOperator == "??" && opToken.Value != "??" && !opToken.Is(Bracket, "(") { + p.errorAt(opToken, "Operator (%v) and coalesce expressions (??) cannot be mixed. Wrap either by parentheses.", opToken.Value) + break + } + var nodeRight Node if op.associativity == left { nodeRight = p.parseExpression(op.precedence + 1) @@ -158,31 +177,23 @@ func (p *parser) parseExpression(precedence int) Node { nodeRight = p.parseExpression(op.precedence) } - if token.Is(Operator, "matches") { - var r *regexp.Regexp - var err error + nodeLeft = &BinaryNode{ + Operator: opToken.Value, + Left: nodeLeft, + Right: nodeRight, + } + nodeLeft.SetLocation(opToken.Location) - if s, ok := nodeRight.(*StringNode); ok { - r, err = regexp.Compile(s.Value) - if err != nil { - p.error("%v", err) - } - } - nodeLeft = &MatchesNode{ - Regexp: r, - Left: nodeLeft, - Right: nodeRight, - } - nodeLeft.SetLocation(token.Location) - } else { - nodeLeft = &BinaryNode{ - Operator: token.Value, - Left: nodeLeft, - Right: nodeRight, + if negate { + nodeLeft = &UnaryNode{ + Operator: "not", + Node: nodeLeft, } - nodeLeft.SetLocation(token.Location) + nodeLeft.SetLocation(notToken.Location) } - token = p.current + + lastOperator = opToken.Value + opToken = p.current continue } } @@ -283,26 +294,26 @@ func (p *parser) parsePrimaryExpression() Node { node.SetLocation(token.Location) return node default: - node = p.parseIdentifierExpression(token, p.current) + node = p.parseIdentifierExpression(token) } case Number: p.next() value := strings.Replace(token.Value, "_", "", -1) - if strings.ContainsAny(value, ".eE") { - number, err := strconv.ParseFloat(value, 64) + if strings.Contains(value, "x") { + number, err := strconv.ParseInt(value, 0, 64) if err != nil { - p.error("invalid float literal: %v", err) + p.error("invalid hex literal: %v", err) } - node := &FloatNode{Value: number} + node := &IntegerNode{Value: int(number)} node.SetLocation(token.Location) return node - } else if strings.Contains(value, "x") { - number, err := strconv.ParseInt(value, 0, 64) + } else if strings.ContainsAny(value, ".eE") { + number, err := strconv.ParseFloat(value, 64) if err != nil { - p.error("invalid hex literal: %v", err) + p.error("invalid float literal: %v", err) } - node := &IntegerNode{Value: int(number)} + node := &FloatNode{Value: number} node.SetLocation(token.Location) return node } else { @@ -334,7 +345,7 @@ func (p *parser) parsePrimaryExpression() Node { return p.parsePostfixExpression(node) } -func (p *parser) parseIdentifierExpression(token, next Token) Node { +func (p *parser) parseIdentifierExpression(token Token) Node { var node Node if p.current.Is(Bracket, "(") { var arguments []Node @@ -359,37 +370,40 @@ func (p *parser) parseIdentifierExpression(token, next Token) Node { } node.SetLocation(token.Location) } else { - arguments = p.parseArguments() - node = &FunctionNode{ - Name: token.Value, - Arguments: arguments, + callee := &IdentifierNode{Value: token.Value} + callee.SetLocation(token.Location) + node = &CallNode{ + Callee: callee, + Arguments: p.parseArguments(), } node.SetLocation(token.Location) } } else { - var nilsafe bool - if next.Value == "?." { - nilsafe = true - } - node = &IdentifierNode{Value: token.Value, NilSafe: nilsafe} + node = &IdentifierNode{Value: token.Value} node.SetLocation(token.Location) } return node } func (p *parser) parseClosure() Node { - token := p.current - p.expect(Bracket, "{") + startToken := p.current + expectClosingBracket := false + if p.current.Is(Bracket, "{") { + p.next() + expectClosingBracket = true + } p.depth++ node := p.parseExpression(0) p.depth-- - p.expect(Bracket, "}") + if expectClosingBracket { + p.expect(Bracket, "}") + } closure := &ClosureNode{ Node: node, } - closure.SetLocation(token.Location) + closure.SetLocation(startToken.Location) return closure } @@ -431,11 +445,11 @@ func (p *parser) parseMapExpression(token Token) Node { } var key Node - // a map key can be: - // * a number - // * a string - // * a identifier, which is equivalent to a string - // * an expression, which must be enclosed in parentheses -- (1 + 2) + // Map key can be one of: + // * number + // * string + // * identifier, which is equivalent to a string + // * expression, which must be enclosed in parentheses -- (1 + 2) if p.current.Is(Number) || p.current.Is(String) || p.current.Is(Identifier) { key = &StringNode{Value: p.current.Value} key.SetLocation(token.Location) @@ -463,43 +477,52 @@ end: } func (p *parser) parsePostfixExpression(node Node) Node { - token := p.current - var nilsafe bool - for (token.Is(Operator) || token.Is(Bracket)) && p.err == nil { - if token.Value == "." || token.Value == "?." { - if token.Value == "?." { - nilsafe = true - } + postfixToken := p.current + for (postfixToken.Is(Operator) || postfixToken.Is(Bracket)) && p.err == nil { + if postfixToken.Value == "." || postfixToken.Value == "?." { p.next() - token = p.current + propertyToken := p.current p.next() - if token.Kind != Identifier && + if propertyToken.Kind != Identifier && // Operators like "not" and "matches" are valid methods or property names. - (token.Kind != Operator || !isValidIdentifier(token.Value)) { + (propertyToken.Kind != Operator || !isValidIdentifier(propertyToken.Value)) { p.error("expected name") } + property := &StringNode{Value: propertyToken.Value} + property.SetLocation(propertyToken.Location) + + chainNode, isChain := node.(*ChainNode) + optional := postfixToken.Value == "?." + + if isChain { + node = chainNode.Node + } + + memberNode := &MemberNode{ + Node: node, + Property: property, + Optional: optional, + } + memberNode.SetLocation(propertyToken.Location) + if p.current.Is(Bracket, "(") { - arguments := p.parseArguments() - node = &MethodNode{ - Node: node, - Method: token.Value, - Arguments: arguments, - NilSafe: nilsafe, + node = &CallNode{ + Callee: memberNode, + Arguments: p.parseArguments(), } - node.SetLocation(token.Location) + node.SetLocation(propertyToken.Location) } else { - node = &PropertyNode{ - Node: node, - Property: token.Value, - NilSafe: nilsafe, - } - node.SetLocation(token.Location) + node = memberNode + } + + if isChain || optional { + node = &ChainNode{Node: node} } - } else if token.Value == "[" { + } else if postfixToken.Value == "[" { p.next() var from, to Node @@ -514,7 +537,7 @@ func (p *parser) parsePostfixExpression(node Node) Node { Node: node, To: to, } - node.SetLocation(token.Location) + node.SetLocation(postfixToken.Location) p.expect(Bracket, "]") } else { @@ -533,25 +556,24 @@ func (p *parser) parsePostfixExpression(node Node) Node { From: from, To: to, } - node.SetLocation(token.Location) + node.SetLocation(postfixToken.Location) p.expect(Bracket, "]") } else { - // Slice operator [:] was not found, it should by just index node. - - node = &IndexNode{ - Node: node, - Index: from, + // Slice operator [:] was not found, + // it should be just an index node. + node = &MemberNode{ + Node: node, + Property: from, } - node.SetLocation(token.Location) + node.SetLocation(postfixToken.Location) p.expect(Bracket, "]") } } } else { break } - - token = p.current + postfixToken = p.current } return node } diff --git a/vendor/github.com/antonmedv/expr/vm/generated.go b/vendor/github.com/antonmedv/expr/vm/generated.go new file mode 100644 index 0000000000..9fc7883e2d --- /dev/null +++ b/vendor/github.com/antonmedv/expr/vm/generated.go @@ -0,0 +1,262 @@ +// Code generated by vm/func_types/main.go. DO NOT EDIT. + +package vm + +import ( + "fmt" + "time" +) + +var FuncTypes = []interface{}{ + 1: new(func() time.Duration), + 2: new(func() time.Month), + 3: new(func() time.Time), + 4: new(func() time.Weekday), + 5: new(func() []uint8), + 6: new(func() []interface{}), + 7: new(func() bool), + 8: new(func() uint8), + 9: new(func() float64), + 10: new(func() int), + 11: new(func() int64), + 12: new(func() interface{}), + 13: new(func() map[string]interface{}), + 14: new(func() int32), + 15: new(func() string), + 16: new(func() uint), + 17: new(func() uint64), + 18: new(func(time.Duration) time.Duration), + 19: new(func(time.Duration) time.Time), + 20: new(func(time.Time) time.Duration), + 21: new(func(time.Time) bool), + 22: new(func([]interface{}, string) string), + 23: new(func([]string, string) string), + 24: new(func(bool) bool), + 25: new(func(bool) float64), + 26: new(func(bool) int), + 27: new(func(bool) string), + 28: new(func(float64) bool), + 29: new(func(float64) float64), + 30: new(func(float64) int), + 31: new(func(float64) string), + 32: new(func(int) bool), + 33: new(func(int) float64), + 34: new(func(int) int), + 35: new(func(int) string), + 36: new(func(int, int) int), + 37: new(func(int, int) string), + 38: new(func(int64) time.Time), + 39: new(func(string) []string), + 40: new(func(string) bool), + 41: new(func(string) float64), + 42: new(func(string) int), + 43: new(func(string) string), + 44: new(func(string, uint8) int), + 45: new(func(string, int) int), + 46: new(func(string, int32) int), + 47: new(func(string, string) bool), + 48: new(func(string, string) string), + 49: new(func(interface{}) bool), + 50: new(func(interface{}) float64), + 51: new(func(interface{}) int), + 52: new(func(interface{}) string), + 53: new(func(interface{}) interface{}), + 54: new(func(interface{}) []interface{}), + 55: new(func(interface{}) map[string]interface{}), + 56: new(func([]interface{}) interface{}), + 57: new(func([]interface{}) []interface{}), + 58: new(func([]interface{}) map[string]interface{}), + 59: new(func(interface{}, interface{}) bool), + 60: new(func(interface{}, interface{}) string), + 61: new(func(interface{}, interface{}) interface{}), + 62: new(func(interface{}, interface{}) []interface{}), +} + +func (vm *VM) call(fn interface{}, kind int) interface{} { + switch kind { + case 1: + return fn.(func() time.Duration)() + case 2: + return fn.(func() time.Month)() + case 3: + return fn.(func() time.Time)() + case 4: + return fn.(func() time.Weekday)() + case 5: + return fn.(func() []uint8)() + case 6: + return fn.(func() []interface{})() + case 7: + return fn.(func() bool)() + case 8: + return fn.(func() uint8)() + case 9: + return fn.(func() float64)() + case 10: + return fn.(func() int)() + case 11: + return fn.(func() int64)() + case 12: + return fn.(func() interface{})() + case 13: + return fn.(func() map[string]interface{})() + case 14: + return fn.(func() int32)() + case 15: + return fn.(func() string)() + case 16: + return fn.(func() uint)() + case 17: + return fn.(func() uint64)() + case 18: + arg1 := vm.pop().(time.Duration) + return fn.(func(time.Duration) time.Duration)(arg1) + case 19: + arg1 := vm.pop().(time.Duration) + return fn.(func(time.Duration) time.Time)(arg1) + case 20: + arg1 := vm.pop().(time.Time) + return fn.(func(time.Time) time.Duration)(arg1) + case 21: + arg1 := vm.pop().(time.Time) + return fn.(func(time.Time) bool)(arg1) + case 22: + arg2 := vm.pop().(string) + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}, string) string)(arg1, arg2) + case 23: + arg2 := vm.pop().(string) + arg1 := vm.pop().([]string) + return fn.(func([]string, string) string)(arg1, arg2) + case 24: + arg1 := vm.pop().(bool) + return fn.(func(bool) bool)(arg1) + case 25: + arg1 := vm.pop().(bool) + return fn.(func(bool) float64)(arg1) + case 26: + arg1 := vm.pop().(bool) + return fn.(func(bool) int)(arg1) + case 27: + arg1 := vm.pop().(bool) + return fn.(func(bool) string)(arg1) + case 28: + arg1 := vm.pop().(float64) + return fn.(func(float64) bool)(arg1) + case 29: + arg1 := vm.pop().(float64) + return fn.(func(float64) float64)(arg1) + case 30: + arg1 := vm.pop().(float64) + return fn.(func(float64) int)(arg1) + case 31: + arg1 := vm.pop().(float64) + return fn.(func(float64) string)(arg1) + case 32: + arg1 := vm.pop().(int) + return fn.(func(int) bool)(arg1) + case 33: + arg1 := vm.pop().(int) + return fn.(func(int) float64)(arg1) + case 34: + arg1 := vm.pop().(int) + return fn.(func(int) int)(arg1) + case 35: + arg1 := vm.pop().(int) + return fn.(func(int) string)(arg1) + case 36: + arg2 := vm.pop().(int) + arg1 := vm.pop().(int) + return fn.(func(int, int) int)(arg1, arg2) + case 37: + arg2 := vm.pop().(int) + arg1 := vm.pop().(int) + return fn.(func(int, int) string)(arg1, arg2) + case 38: + arg1 := vm.pop().(int64) + return fn.(func(int64) time.Time)(arg1) + case 39: + arg1 := vm.pop().(string) + return fn.(func(string) []string)(arg1) + case 40: + arg1 := vm.pop().(string) + return fn.(func(string) bool)(arg1) + case 41: + arg1 := vm.pop().(string) + return fn.(func(string) float64)(arg1) + case 42: + arg1 := vm.pop().(string) + return fn.(func(string) int)(arg1) + case 43: + arg1 := vm.pop().(string) + return fn.(func(string) string)(arg1) + case 44: + arg2 := vm.pop().(uint8) + arg1 := vm.pop().(string) + return fn.(func(string, uint8) int)(arg1, arg2) + case 45: + arg2 := vm.pop().(int) + arg1 := vm.pop().(string) + return fn.(func(string, int) int)(arg1, arg2) + case 46: + arg2 := vm.pop().(int32) + arg1 := vm.pop().(string) + return fn.(func(string, int32) int)(arg1, arg2) + case 47: + arg2 := vm.pop().(string) + arg1 := vm.pop().(string) + return fn.(func(string, string) bool)(arg1, arg2) + case 48: + arg2 := vm.pop().(string) + arg1 := vm.pop().(string) + return fn.(func(string, string) string)(arg1, arg2) + case 49: + arg1 := vm.pop() + return fn.(func(interface{}) bool)(arg1) + case 50: + arg1 := vm.pop() + return fn.(func(interface{}) float64)(arg1) + case 51: + arg1 := vm.pop() + return fn.(func(interface{}) int)(arg1) + case 52: + arg1 := vm.pop() + return fn.(func(interface{}) string)(arg1) + case 53: + arg1 := vm.pop() + return fn.(func(interface{}) interface{})(arg1) + case 54: + arg1 := vm.pop() + return fn.(func(interface{}) []interface{})(arg1) + case 55: + arg1 := vm.pop() + return fn.(func(interface{}) map[string]interface{})(arg1) + case 56: + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) interface{})(arg1) + case 57: + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) []interface{})(arg1) + case 58: + arg1 := vm.pop().([]interface{}) + return fn.(func([]interface{}) map[string]interface{})(arg1) + case 59: + arg2 := vm.pop() + arg1 := vm.pop() + return fn.(func(interface{}, interface{}) bool)(arg1, arg2) + case 60: + arg2 := vm.pop() + arg1 := vm.pop() + return fn.(func(interface{}, interface{}) string)(arg1, arg2) + case 61: + arg2 := vm.pop() + arg1 := vm.pop() + return fn.(func(interface{}, interface{}) interface{})(arg1, arg2) + case 62: + arg2 := vm.pop() + arg1 := vm.pop() + return fn.(func(interface{}, interface{}) []interface{})(arg1, arg2) + + } + panic(fmt.Sprintf("unknown function kind (%v)", kind)) +} diff --git a/vendor/github.com/antonmedv/expr/vm/helpers.go b/vendor/github.com/antonmedv/expr/vm/helpers.go deleted file mode 100644 index 775b0e759f..0000000000 --- a/vendor/github.com/antonmedv/expr/vm/helpers.go +++ /dev/null @@ -1,3247 +0,0 @@ -// Code generated by vm/generate/main.go. DO NOT EDIT. - -package vm - -import ( - "fmt" - "reflect" -) - -func equal(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x == y - case uint8: - return uint8(x) == y - case uint16: - return uint16(x) == y - case uint32: - return uint32(x) == y - case uint64: - return uint64(x) == y - case int: - return int(x) == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case uint8: - switch y := b.(type) { - case uint: - return x == uint8(y) - case uint8: - return x == y - case uint16: - return uint16(x) == y - case uint32: - return uint32(x) == y - case uint64: - return uint64(x) == y - case int: - return int(x) == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case uint16: - switch y := b.(type) { - case uint: - return x == uint16(y) - case uint8: - return x == uint16(y) - case uint16: - return x == y - case uint32: - return uint32(x) == y - case uint64: - return uint64(x) == y - case int: - return int(x) == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case uint32: - switch y := b.(type) { - case uint: - return x == uint32(y) - case uint8: - return x == uint32(y) - case uint16: - return x == uint32(y) - case uint32: - return x == y - case uint64: - return uint64(x) == y - case int: - return int(x) == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case uint64: - switch y := b.(type) { - case uint: - return x == uint64(y) - case uint8: - return x == uint64(y) - case uint16: - return x == uint64(y) - case uint32: - return x == uint64(y) - case uint64: - return x == y - case int: - return int(x) == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case int: - switch y := b.(type) { - case uint: - return x == int(y) - case uint8: - return x == int(y) - case uint16: - return x == int(y) - case uint32: - return x == int(y) - case uint64: - return x == int(y) - case int: - return x == y - case int8: - return int8(x) == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case int8: - switch y := b.(type) { - case uint: - return x == int8(y) - case uint8: - return x == int8(y) - case uint16: - return x == int8(y) - case uint32: - return x == int8(y) - case uint64: - return x == int8(y) - case int: - return x == int8(y) - case int8: - return x == y - case int16: - return int16(x) == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case int16: - switch y := b.(type) { - case uint: - return x == int16(y) - case uint8: - return x == int16(y) - case uint16: - return x == int16(y) - case uint32: - return x == int16(y) - case uint64: - return x == int16(y) - case int: - return x == int16(y) - case int8: - return x == int16(y) - case int16: - return x == y - case int32: - return int32(x) == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case int32: - switch y := b.(type) { - case uint: - return x == int32(y) - case uint8: - return x == int32(y) - case uint16: - return x == int32(y) - case uint32: - return x == int32(y) - case uint64: - return x == int32(y) - case int: - return x == int32(y) - case int8: - return x == int32(y) - case int16: - return x == int32(y) - case int32: - return x == y - case int64: - return int64(x) == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case int64: - switch y := b.(type) { - case uint: - return x == int64(y) - case uint8: - return x == int64(y) - case uint16: - return x == int64(y) - case uint32: - return x == int64(y) - case uint64: - return x == int64(y) - case int: - return x == int64(y) - case int8: - return x == int64(y) - case int16: - return x == int64(y) - case int32: - return x == int64(y) - case int64: - return x == y - case float32: - return float32(x) == y - case float64: - return float64(x) == y - } - case float32: - switch y := b.(type) { - case uint: - return x == float32(y) - case uint8: - return x == float32(y) - case uint16: - return x == float32(y) - case uint32: - return x == float32(y) - case uint64: - return x == float32(y) - case int: - return x == float32(y) - case int8: - return x == float32(y) - case int16: - return x == float32(y) - case int32: - return x == float32(y) - case int64: - return x == float32(y) - case float32: - return x == y - case float64: - return float64(x) == y - } - case float64: - switch y := b.(type) { - case uint: - return x == float64(y) - case uint8: - return x == float64(y) - case uint16: - return x == float64(y) - case uint32: - return x == float64(y) - case uint64: - return x == float64(y) - case int: - return x == float64(y) - case int8: - return x == float64(y) - case int16: - return x == float64(y) - case int32: - return x == float64(y) - case int64: - return x == float64(y) - case float32: - return x == float64(y) - case float64: - return x == y - } - case string: - switch y := b.(type) { - case string: - return x == y - } - } - if isNil(a) && isNil(b) { - return true - } - return reflect.DeepEqual(a, b) -} - -func less(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x < y - case uint8: - return uint8(x) < y - case uint16: - return uint16(x) < y - case uint32: - return uint32(x) < y - case uint64: - return uint64(x) < y - case int: - return int(x) < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case uint8: - switch y := b.(type) { - case uint: - return x < uint8(y) - case uint8: - return x < y - case uint16: - return uint16(x) < y - case uint32: - return uint32(x) < y - case uint64: - return uint64(x) < y - case int: - return int(x) < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case uint16: - switch y := b.(type) { - case uint: - return x < uint16(y) - case uint8: - return x < uint16(y) - case uint16: - return x < y - case uint32: - return uint32(x) < y - case uint64: - return uint64(x) < y - case int: - return int(x) < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case uint32: - switch y := b.(type) { - case uint: - return x < uint32(y) - case uint8: - return x < uint32(y) - case uint16: - return x < uint32(y) - case uint32: - return x < y - case uint64: - return uint64(x) < y - case int: - return int(x) < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case uint64: - switch y := b.(type) { - case uint: - return x < uint64(y) - case uint8: - return x < uint64(y) - case uint16: - return x < uint64(y) - case uint32: - return x < uint64(y) - case uint64: - return x < y - case int: - return int(x) < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case int: - switch y := b.(type) { - case uint: - return x < int(y) - case uint8: - return x < int(y) - case uint16: - return x < int(y) - case uint32: - return x < int(y) - case uint64: - return x < int(y) - case int: - return x < y - case int8: - return int8(x) < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case int8: - switch y := b.(type) { - case uint: - return x < int8(y) - case uint8: - return x < int8(y) - case uint16: - return x < int8(y) - case uint32: - return x < int8(y) - case uint64: - return x < int8(y) - case int: - return x < int8(y) - case int8: - return x < y - case int16: - return int16(x) < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case int16: - switch y := b.(type) { - case uint: - return x < int16(y) - case uint8: - return x < int16(y) - case uint16: - return x < int16(y) - case uint32: - return x < int16(y) - case uint64: - return x < int16(y) - case int: - return x < int16(y) - case int8: - return x < int16(y) - case int16: - return x < y - case int32: - return int32(x) < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case int32: - switch y := b.(type) { - case uint: - return x < int32(y) - case uint8: - return x < int32(y) - case uint16: - return x < int32(y) - case uint32: - return x < int32(y) - case uint64: - return x < int32(y) - case int: - return x < int32(y) - case int8: - return x < int32(y) - case int16: - return x < int32(y) - case int32: - return x < y - case int64: - return int64(x) < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case int64: - switch y := b.(type) { - case uint: - return x < int64(y) - case uint8: - return x < int64(y) - case uint16: - return x < int64(y) - case uint32: - return x < int64(y) - case uint64: - return x < int64(y) - case int: - return x < int64(y) - case int8: - return x < int64(y) - case int16: - return x < int64(y) - case int32: - return x < int64(y) - case int64: - return x < y - case float32: - return float32(x) < y - case float64: - return float64(x) < y - } - case float32: - switch y := b.(type) { - case uint: - return x < float32(y) - case uint8: - return x < float32(y) - case uint16: - return x < float32(y) - case uint32: - return x < float32(y) - case uint64: - return x < float32(y) - case int: - return x < float32(y) - case int8: - return x < float32(y) - case int16: - return x < float32(y) - case int32: - return x < float32(y) - case int64: - return x < float32(y) - case float32: - return x < y - case float64: - return float64(x) < y - } - case float64: - switch y := b.(type) { - case uint: - return x < float64(y) - case uint8: - return x < float64(y) - case uint16: - return x < float64(y) - case uint32: - return x < float64(y) - case uint64: - return x < float64(y) - case int: - return x < float64(y) - case int8: - return x < float64(y) - case int16: - return x < float64(y) - case int32: - return x < float64(y) - case int64: - return x < float64(y) - case float32: - return x < float64(y) - case float64: - return x < y - } - case string: - switch y := b.(type) { - case string: - return x < y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "<", b)) -} - -func more(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x > y - case uint8: - return uint8(x) > y - case uint16: - return uint16(x) > y - case uint32: - return uint32(x) > y - case uint64: - return uint64(x) > y - case int: - return int(x) > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case uint8: - switch y := b.(type) { - case uint: - return x > uint8(y) - case uint8: - return x > y - case uint16: - return uint16(x) > y - case uint32: - return uint32(x) > y - case uint64: - return uint64(x) > y - case int: - return int(x) > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case uint16: - switch y := b.(type) { - case uint: - return x > uint16(y) - case uint8: - return x > uint16(y) - case uint16: - return x > y - case uint32: - return uint32(x) > y - case uint64: - return uint64(x) > y - case int: - return int(x) > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case uint32: - switch y := b.(type) { - case uint: - return x > uint32(y) - case uint8: - return x > uint32(y) - case uint16: - return x > uint32(y) - case uint32: - return x > y - case uint64: - return uint64(x) > y - case int: - return int(x) > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case uint64: - switch y := b.(type) { - case uint: - return x > uint64(y) - case uint8: - return x > uint64(y) - case uint16: - return x > uint64(y) - case uint32: - return x > uint64(y) - case uint64: - return x > y - case int: - return int(x) > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case int: - switch y := b.(type) { - case uint: - return x > int(y) - case uint8: - return x > int(y) - case uint16: - return x > int(y) - case uint32: - return x > int(y) - case uint64: - return x > int(y) - case int: - return x > y - case int8: - return int8(x) > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case int8: - switch y := b.(type) { - case uint: - return x > int8(y) - case uint8: - return x > int8(y) - case uint16: - return x > int8(y) - case uint32: - return x > int8(y) - case uint64: - return x > int8(y) - case int: - return x > int8(y) - case int8: - return x > y - case int16: - return int16(x) > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case int16: - switch y := b.(type) { - case uint: - return x > int16(y) - case uint8: - return x > int16(y) - case uint16: - return x > int16(y) - case uint32: - return x > int16(y) - case uint64: - return x > int16(y) - case int: - return x > int16(y) - case int8: - return x > int16(y) - case int16: - return x > y - case int32: - return int32(x) > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case int32: - switch y := b.(type) { - case uint: - return x > int32(y) - case uint8: - return x > int32(y) - case uint16: - return x > int32(y) - case uint32: - return x > int32(y) - case uint64: - return x > int32(y) - case int: - return x > int32(y) - case int8: - return x > int32(y) - case int16: - return x > int32(y) - case int32: - return x > y - case int64: - return int64(x) > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case int64: - switch y := b.(type) { - case uint: - return x > int64(y) - case uint8: - return x > int64(y) - case uint16: - return x > int64(y) - case uint32: - return x > int64(y) - case uint64: - return x > int64(y) - case int: - return x > int64(y) - case int8: - return x > int64(y) - case int16: - return x > int64(y) - case int32: - return x > int64(y) - case int64: - return x > y - case float32: - return float32(x) > y - case float64: - return float64(x) > y - } - case float32: - switch y := b.(type) { - case uint: - return x > float32(y) - case uint8: - return x > float32(y) - case uint16: - return x > float32(y) - case uint32: - return x > float32(y) - case uint64: - return x > float32(y) - case int: - return x > float32(y) - case int8: - return x > float32(y) - case int16: - return x > float32(y) - case int32: - return x > float32(y) - case int64: - return x > float32(y) - case float32: - return x > y - case float64: - return float64(x) > y - } - case float64: - switch y := b.(type) { - case uint: - return x > float64(y) - case uint8: - return x > float64(y) - case uint16: - return x > float64(y) - case uint32: - return x > float64(y) - case uint64: - return x > float64(y) - case int: - return x > float64(y) - case int8: - return x > float64(y) - case int16: - return x > float64(y) - case int32: - return x > float64(y) - case int64: - return x > float64(y) - case float32: - return x > float64(y) - case float64: - return x > y - } - case string: - switch y := b.(type) { - case string: - return x > y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, ">", b)) -} - -func lessOrEqual(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x <= y - case uint8: - return uint8(x) <= y - case uint16: - return uint16(x) <= y - case uint32: - return uint32(x) <= y - case uint64: - return uint64(x) <= y - case int: - return int(x) <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case uint8: - switch y := b.(type) { - case uint: - return x <= uint8(y) - case uint8: - return x <= y - case uint16: - return uint16(x) <= y - case uint32: - return uint32(x) <= y - case uint64: - return uint64(x) <= y - case int: - return int(x) <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case uint16: - switch y := b.(type) { - case uint: - return x <= uint16(y) - case uint8: - return x <= uint16(y) - case uint16: - return x <= y - case uint32: - return uint32(x) <= y - case uint64: - return uint64(x) <= y - case int: - return int(x) <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case uint32: - switch y := b.(type) { - case uint: - return x <= uint32(y) - case uint8: - return x <= uint32(y) - case uint16: - return x <= uint32(y) - case uint32: - return x <= y - case uint64: - return uint64(x) <= y - case int: - return int(x) <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case uint64: - switch y := b.(type) { - case uint: - return x <= uint64(y) - case uint8: - return x <= uint64(y) - case uint16: - return x <= uint64(y) - case uint32: - return x <= uint64(y) - case uint64: - return x <= y - case int: - return int(x) <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case int: - switch y := b.(type) { - case uint: - return x <= int(y) - case uint8: - return x <= int(y) - case uint16: - return x <= int(y) - case uint32: - return x <= int(y) - case uint64: - return x <= int(y) - case int: - return x <= y - case int8: - return int8(x) <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case int8: - switch y := b.(type) { - case uint: - return x <= int8(y) - case uint8: - return x <= int8(y) - case uint16: - return x <= int8(y) - case uint32: - return x <= int8(y) - case uint64: - return x <= int8(y) - case int: - return x <= int8(y) - case int8: - return x <= y - case int16: - return int16(x) <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case int16: - switch y := b.(type) { - case uint: - return x <= int16(y) - case uint8: - return x <= int16(y) - case uint16: - return x <= int16(y) - case uint32: - return x <= int16(y) - case uint64: - return x <= int16(y) - case int: - return x <= int16(y) - case int8: - return x <= int16(y) - case int16: - return x <= y - case int32: - return int32(x) <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case int32: - switch y := b.(type) { - case uint: - return x <= int32(y) - case uint8: - return x <= int32(y) - case uint16: - return x <= int32(y) - case uint32: - return x <= int32(y) - case uint64: - return x <= int32(y) - case int: - return x <= int32(y) - case int8: - return x <= int32(y) - case int16: - return x <= int32(y) - case int32: - return x <= y - case int64: - return int64(x) <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case int64: - switch y := b.(type) { - case uint: - return x <= int64(y) - case uint8: - return x <= int64(y) - case uint16: - return x <= int64(y) - case uint32: - return x <= int64(y) - case uint64: - return x <= int64(y) - case int: - return x <= int64(y) - case int8: - return x <= int64(y) - case int16: - return x <= int64(y) - case int32: - return x <= int64(y) - case int64: - return x <= y - case float32: - return float32(x) <= y - case float64: - return float64(x) <= y - } - case float32: - switch y := b.(type) { - case uint: - return x <= float32(y) - case uint8: - return x <= float32(y) - case uint16: - return x <= float32(y) - case uint32: - return x <= float32(y) - case uint64: - return x <= float32(y) - case int: - return x <= float32(y) - case int8: - return x <= float32(y) - case int16: - return x <= float32(y) - case int32: - return x <= float32(y) - case int64: - return x <= float32(y) - case float32: - return x <= y - case float64: - return float64(x) <= y - } - case float64: - switch y := b.(type) { - case uint: - return x <= float64(y) - case uint8: - return x <= float64(y) - case uint16: - return x <= float64(y) - case uint32: - return x <= float64(y) - case uint64: - return x <= float64(y) - case int: - return x <= float64(y) - case int8: - return x <= float64(y) - case int16: - return x <= float64(y) - case int32: - return x <= float64(y) - case int64: - return x <= float64(y) - case float32: - return x <= float64(y) - case float64: - return x <= y - } - case string: - switch y := b.(type) { - case string: - return x <= y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "<=", b)) -} - -func moreOrEqual(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x >= y - case uint8: - return uint8(x) >= y - case uint16: - return uint16(x) >= y - case uint32: - return uint32(x) >= y - case uint64: - return uint64(x) >= y - case int: - return int(x) >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case uint8: - switch y := b.(type) { - case uint: - return x >= uint8(y) - case uint8: - return x >= y - case uint16: - return uint16(x) >= y - case uint32: - return uint32(x) >= y - case uint64: - return uint64(x) >= y - case int: - return int(x) >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case uint16: - switch y := b.(type) { - case uint: - return x >= uint16(y) - case uint8: - return x >= uint16(y) - case uint16: - return x >= y - case uint32: - return uint32(x) >= y - case uint64: - return uint64(x) >= y - case int: - return int(x) >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case uint32: - switch y := b.(type) { - case uint: - return x >= uint32(y) - case uint8: - return x >= uint32(y) - case uint16: - return x >= uint32(y) - case uint32: - return x >= y - case uint64: - return uint64(x) >= y - case int: - return int(x) >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case uint64: - switch y := b.(type) { - case uint: - return x >= uint64(y) - case uint8: - return x >= uint64(y) - case uint16: - return x >= uint64(y) - case uint32: - return x >= uint64(y) - case uint64: - return x >= y - case int: - return int(x) >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case int: - switch y := b.(type) { - case uint: - return x >= int(y) - case uint8: - return x >= int(y) - case uint16: - return x >= int(y) - case uint32: - return x >= int(y) - case uint64: - return x >= int(y) - case int: - return x >= y - case int8: - return int8(x) >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case int8: - switch y := b.(type) { - case uint: - return x >= int8(y) - case uint8: - return x >= int8(y) - case uint16: - return x >= int8(y) - case uint32: - return x >= int8(y) - case uint64: - return x >= int8(y) - case int: - return x >= int8(y) - case int8: - return x >= y - case int16: - return int16(x) >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case int16: - switch y := b.(type) { - case uint: - return x >= int16(y) - case uint8: - return x >= int16(y) - case uint16: - return x >= int16(y) - case uint32: - return x >= int16(y) - case uint64: - return x >= int16(y) - case int: - return x >= int16(y) - case int8: - return x >= int16(y) - case int16: - return x >= y - case int32: - return int32(x) >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case int32: - switch y := b.(type) { - case uint: - return x >= int32(y) - case uint8: - return x >= int32(y) - case uint16: - return x >= int32(y) - case uint32: - return x >= int32(y) - case uint64: - return x >= int32(y) - case int: - return x >= int32(y) - case int8: - return x >= int32(y) - case int16: - return x >= int32(y) - case int32: - return x >= y - case int64: - return int64(x) >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case int64: - switch y := b.(type) { - case uint: - return x >= int64(y) - case uint8: - return x >= int64(y) - case uint16: - return x >= int64(y) - case uint32: - return x >= int64(y) - case uint64: - return x >= int64(y) - case int: - return x >= int64(y) - case int8: - return x >= int64(y) - case int16: - return x >= int64(y) - case int32: - return x >= int64(y) - case int64: - return x >= y - case float32: - return float32(x) >= y - case float64: - return float64(x) >= y - } - case float32: - switch y := b.(type) { - case uint: - return x >= float32(y) - case uint8: - return x >= float32(y) - case uint16: - return x >= float32(y) - case uint32: - return x >= float32(y) - case uint64: - return x >= float32(y) - case int: - return x >= float32(y) - case int8: - return x >= float32(y) - case int16: - return x >= float32(y) - case int32: - return x >= float32(y) - case int64: - return x >= float32(y) - case float32: - return x >= y - case float64: - return float64(x) >= y - } - case float64: - switch y := b.(type) { - case uint: - return x >= float64(y) - case uint8: - return x >= float64(y) - case uint16: - return x >= float64(y) - case uint32: - return x >= float64(y) - case uint64: - return x >= float64(y) - case int: - return x >= float64(y) - case int8: - return x >= float64(y) - case int16: - return x >= float64(y) - case int32: - return x >= float64(y) - case int64: - return x >= float64(y) - case float32: - return x >= float64(y) - case float64: - return x >= y - } - case string: - switch y := b.(type) { - case string: - return x >= y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, ">=", b)) -} - -func add(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x + y - case uint8: - return uint8(x) + y - case uint16: - return uint16(x) + y - case uint32: - return uint32(x) + y - case uint64: - return uint64(x) + y - case int: - return int(x) + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case uint8: - switch y := b.(type) { - case uint: - return x + uint8(y) - case uint8: - return x + y - case uint16: - return uint16(x) + y - case uint32: - return uint32(x) + y - case uint64: - return uint64(x) + y - case int: - return int(x) + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case uint16: - switch y := b.(type) { - case uint: - return x + uint16(y) - case uint8: - return x + uint16(y) - case uint16: - return x + y - case uint32: - return uint32(x) + y - case uint64: - return uint64(x) + y - case int: - return int(x) + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case uint32: - switch y := b.(type) { - case uint: - return x + uint32(y) - case uint8: - return x + uint32(y) - case uint16: - return x + uint32(y) - case uint32: - return x + y - case uint64: - return uint64(x) + y - case int: - return int(x) + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case uint64: - switch y := b.(type) { - case uint: - return x + uint64(y) - case uint8: - return x + uint64(y) - case uint16: - return x + uint64(y) - case uint32: - return x + uint64(y) - case uint64: - return x + y - case int: - return int(x) + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case int: - switch y := b.(type) { - case uint: - return x + int(y) - case uint8: - return x + int(y) - case uint16: - return x + int(y) - case uint32: - return x + int(y) - case uint64: - return x + int(y) - case int: - return x + y - case int8: - return int8(x) + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case int8: - switch y := b.(type) { - case uint: - return x + int8(y) - case uint8: - return x + int8(y) - case uint16: - return x + int8(y) - case uint32: - return x + int8(y) - case uint64: - return x + int8(y) - case int: - return x + int8(y) - case int8: - return x + y - case int16: - return int16(x) + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case int16: - switch y := b.(type) { - case uint: - return x + int16(y) - case uint8: - return x + int16(y) - case uint16: - return x + int16(y) - case uint32: - return x + int16(y) - case uint64: - return x + int16(y) - case int: - return x + int16(y) - case int8: - return x + int16(y) - case int16: - return x + y - case int32: - return int32(x) + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case int32: - switch y := b.(type) { - case uint: - return x + int32(y) - case uint8: - return x + int32(y) - case uint16: - return x + int32(y) - case uint32: - return x + int32(y) - case uint64: - return x + int32(y) - case int: - return x + int32(y) - case int8: - return x + int32(y) - case int16: - return x + int32(y) - case int32: - return x + y - case int64: - return int64(x) + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case int64: - switch y := b.(type) { - case uint: - return x + int64(y) - case uint8: - return x + int64(y) - case uint16: - return x + int64(y) - case uint32: - return x + int64(y) - case uint64: - return x + int64(y) - case int: - return x + int64(y) - case int8: - return x + int64(y) - case int16: - return x + int64(y) - case int32: - return x + int64(y) - case int64: - return x + y - case float32: - return float32(x) + y - case float64: - return float64(x) + y - } - case float32: - switch y := b.(type) { - case uint: - return x + float32(y) - case uint8: - return x + float32(y) - case uint16: - return x + float32(y) - case uint32: - return x + float32(y) - case uint64: - return x + float32(y) - case int: - return x + float32(y) - case int8: - return x + float32(y) - case int16: - return x + float32(y) - case int32: - return x + float32(y) - case int64: - return x + float32(y) - case float32: - return x + y - case float64: - return float64(x) + y - } - case float64: - switch y := b.(type) { - case uint: - return x + float64(y) - case uint8: - return x + float64(y) - case uint16: - return x + float64(y) - case uint32: - return x + float64(y) - case uint64: - return x + float64(y) - case int: - return x + float64(y) - case int8: - return x + float64(y) - case int16: - return x + float64(y) - case int32: - return x + float64(y) - case int64: - return x + float64(y) - case float32: - return x + float64(y) - case float64: - return x + y - } - case string: - switch y := b.(type) { - case string: - return x + y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "+", b)) -} - -func subtract(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x - y - case uint8: - return uint8(x) - y - case uint16: - return uint16(x) - y - case uint32: - return uint32(x) - y - case uint64: - return uint64(x) - y - case int: - return int(x) - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case uint8: - switch y := b.(type) { - case uint: - return x - uint8(y) - case uint8: - return x - y - case uint16: - return uint16(x) - y - case uint32: - return uint32(x) - y - case uint64: - return uint64(x) - y - case int: - return int(x) - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case uint16: - switch y := b.(type) { - case uint: - return x - uint16(y) - case uint8: - return x - uint16(y) - case uint16: - return x - y - case uint32: - return uint32(x) - y - case uint64: - return uint64(x) - y - case int: - return int(x) - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case uint32: - switch y := b.(type) { - case uint: - return x - uint32(y) - case uint8: - return x - uint32(y) - case uint16: - return x - uint32(y) - case uint32: - return x - y - case uint64: - return uint64(x) - y - case int: - return int(x) - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case uint64: - switch y := b.(type) { - case uint: - return x - uint64(y) - case uint8: - return x - uint64(y) - case uint16: - return x - uint64(y) - case uint32: - return x - uint64(y) - case uint64: - return x - y - case int: - return int(x) - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case int: - switch y := b.(type) { - case uint: - return x - int(y) - case uint8: - return x - int(y) - case uint16: - return x - int(y) - case uint32: - return x - int(y) - case uint64: - return x - int(y) - case int: - return x - y - case int8: - return int8(x) - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case int8: - switch y := b.(type) { - case uint: - return x - int8(y) - case uint8: - return x - int8(y) - case uint16: - return x - int8(y) - case uint32: - return x - int8(y) - case uint64: - return x - int8(y) - case int: - return x - int8(y) - case int8: - return x - y - case int16: - return int16(x) - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case int16: - switch y := b.(type) { - case uint: - return x - int16(y) - case uint8: - return x - int16(y) - case uint16: - return x - int16(y) - case uint32: - return x - int16(y) - case uint64: - return x - int16(y) - case int: - return x - int16(y) - case int8: - return x - int16(y) - case int16: - return x - y - case int32: - return int32(x) - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case int32: - switch y := b.(type) { - case uint: - return x - int32(y) - case uint8: - return x - int32(y) - case uint16: - return x - int32(y) - case uint32: - return x - int32(y) - case uint64: - return x - int32(y) - case int: - return x - int32(y) - case int8: - return x - int32(y) - case int16: - return x - int32(y) - case int32: - return x - y - case int64: - return int64(x) - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case int64: - switch y := b.(type) { - case uint: - return x - int64(y) - case uint8: - return x - int64(y) - case uint16: - return x - int64(y) - case uint32: - return x - int64(y) - case uint64: - return x - int64(y) - case int: - return x - int64(y) - case int8: - return x - int64(y) - case int16: - return x - int64(y) - case int32: - return x - int64(y) - case int64: - return x - y - case float32: - return float32(x) - y - case float64: - return float64(x) - y - } - case float32: - switch y := b.(type) { - case uint: - return x - float32(y) - case uint8: - return x - float32(y) - case uint16: - return x - float32(y) - case uint32: - return x - float32(y) - case uint64: - return x - float32(y) - case int: - return x - float32(y) - case int8: - return x - float32(y) - case int16: - return x - float32(y) - case int32: - return x - float32(y) - case int64: - return x - float32(y) - case float32: - return x - y - case float64: - return float64(x) - y - } - case float64: - switch y := b.(type) { - case uint: - return x - float64(y) - case uint8: - return x - float64(y) - case uint16: - return x - float64(y) - case uint32: - return x - float64(y) - case uint64: - return x - float64(y) - case int: - return x - float64(y) - case int8: - return x - float64(y) - case int16: - return x - float64(y) - case int32: - return x - float64(y) - case int64: - return x - float64(y) - case float32: - return x - float64(y) - case float64: - return x - y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "-", b)) -} - -func multiply(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x * y - case uint8: - return uint8(x) * y - case uint16: - return uint16(x) * y - case uint32: - return uint32(x) * y - case uint64: - return uint64(x) * y - case int: - return int(x) * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case uint8: - switch y := b.(type) { - case uint: - return x * uint8(y) - case uint8: - return x * y - case uint16: - return uint16(x) * y - case uint32: - return uint32(x) * y - case uint64: - return uint64(x) * y - case int: - return int(x) * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case uint16: - switch y := b.(type) { - case uint: - return x * uint16(y) - case uint8: - return x * uint16(y) - case uint16: - return x * y - case uint32: - return uint32(x) * y - case uint64: - return uint64(x) * y - case int: - return int(x) * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case uint32: - switch y := b.(type) { - case uint: - return x * uint32(y) - case uint8: - return x * uint32(y) - case uint16: - return x * uint32(y) - case uint32: - return x * y - case uint64: - return uint64(x) * y - case int: - return int(x) * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case uint64: - switch y := b.(type) { - case uint: - return x * uint64(y) - case uint8: - return x * uint64(y) - case uint16: - return x * uint64(y) - case uint32: - return x * uint64(y) - case uint64: - return x * y - case int: - return int(x) * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case int: - switch y := b.(type) { - case uint: - return x * int(y) - case uint8: - return x * int(y) - case uint16: - return x * int(y) - case uint32: - return x * int(y) - case uint64: - return x * int(y) - case int: - return x * y - case int8: - return int8(x) * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case int8: - switch y := b.(type) { - case uint: - return x * int8(y) - case uint8: - return x * int8(y) - case uint16: - return x * int8(y) - case uint32: - return x * int8(y) - case uint64: - return x * int8(y) - case int: - return x * int8(y) - case int8: - return x * y - case int16: - return int16(x) * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case int16: - switch y := b.(type) { - case uint: - return x * int16(y) - case uint8: - return x * int16(y) - case uint16: - return x * int16(y) - case uint32: - return x * int16(y) - case uint64: - return x * int16(y) - case int: - return x * int16(y) - case int8: - return x * int16(y) - case int16: - return x * y - case int32: - return int32(x) * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case int32: - switch y := b.(type) { - case uint: - return x * int32(y) - case uint8: - return x * int32(y) - case uint16: - return x * int32(y) - case uint32: - return x * int32(y) - case uint64: - return x * int32(y) - case int: - return x * int32(y) - case int8: - return x * int32(y) - case int16: - return x * int32(y) - case int32: - return x * y - case int64: - return int64(x) * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case int64: - switch y := b.(type) { - case uint: - return x * int64(y) - case uint8: - return x * int64(y) - case uint16: - return x * int64(y) - case uint32: - return x * int64(y) - case uint64: - return x * int64(y) - case int: - return x * int64(y) - case int8: - return x * int64(y) - case int16: - return x * int64(y) - case int32: - return x * int64(y) - case int64: - return x * y - case float32: - return float32(x) * y - case float64: - return float64(x) * y - } - case float32: - switch y := b.(type) { - case uint: - return x * float32(y) - case uint8: - return x * float32(y) - case uint16: - return x * float32(y) - case uint32: - return x * float32(y) - case uint64: - return x * float32(y) - case int: - return x * float32(y) - case int8: - return x * float32(y) - case int16: - return x * float32(y) - case int32: - return x * float32(y) - case int64: - return x * float32(y) - case float32: - return x * y - case float64: - return float64(x) * y - } - case float64: - switch y := b.(type) { - case uint: - return x * float64(y) - case uint8: - return x * float64(y) - case uint16: - return x * float64(y) - case uint32: - return x * float64(y) - case uint64: - return x * float64(y) - case int: - return x * float64(y) - case int8: - return x * float64(y) - case int16: - return x * float64(y) - case int32: - return x * float64(y) - case int64: - return x * float64(y) - case float32: - return x * float64(y) - case float64: - return x * y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "*", b)) -} - -func divide(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x / y - case uint8: - return uint8(x) / y - case uint16: - return uint16(x) / y - case uint32: - return uint32(x) / y - case uint64: - return uint64(x) / y - case int: - return int(x) / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case uint8: - switch y := b.(type) { - case uint: - return x / uint8(y) - case uint8: - return x / y - case uint16: - return uint16(x) / y - case uint32: - return uint32(x) / y - case uint64: - return uint64(x) / y - case int: - return int(x) / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case uint16: - switch y := b.(type) { - case uint: - return x / uint16(y) - case uint8: - return x / uint16(y) - case uint16: - return x / y - case uint32: - return uint32(x) / y - case uint64: - return uint64(x) / y - case int: - return int(x) / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case uint32: - switch y := b.(type) { - case uint: - return x / uint32(y) - case uint8: - return x / uint32(y) - case uint16: - return x / uint32(y) - case uint32: - return x / y - case uint64: - return uint64(x) / y - case int: - return int(x) / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case uint64: - switch y := b.(type) { - case uint: - return x / uint64(y) - case uint8: - return x / uint64(y) - case uint16: - return x / uint64(y) - case uint32: - return x / uint64(y) - case uint64: - return x / y - case int: - return int(x) / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case int: - switch y := b.(type) { - case uint: - return x / int(y) - case uint8: - return x / int(y) - case uint16: - return x / int(y) - case uint32: - return x / int(y) - case uint64: - return x / int(y) - case int: - return x / y - case int8: - return int8(x) / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case int8: - switch y := b.(type) { - case uint: - return x / int8(y) - case uint8: - return x / int8(y) - case uint16: - return x / int8(y) - case uint32: - return x / int8(y) - case uint64: - return x / int8(y) - case int: - return x / int8(y) - case int8: - return x / y - case int16: - return int16(x) / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case int16: - switch y := b.(type) { - case uint: - return x / int16(y) - case uint8: - return x / int16(y) - case uint16: - return x / int16(y) - case uint32: - return x / int16(y) - case uint64: - return x / int16(y) - case int: - return x / int16(y) - case int8: - return x / int16(y) - case int16: - return x / y - case int32: - return int32(x) / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case int32: - switch y := b.(type) { - case uint: - return x / int32(y) - case uint8: - return x / int32(y) - case uint16: - return x / int32(y) - case uint32: - return x / int32(y) - case uint64: - return x / int32(y) - case int: - return x / int32(y) - case int8: - return x / int32(y) - case int16: - return x / int32(y) - case int32: - return x / y - case int64: - return int64(x) / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case int64: - switch y := b.(type) { - case uint: - return x / int64(y) - case uint8: - return x / int64(y) - case uint16: - return x / int64(y) - case uint32: - return x / int64(y) - case uint64: - return x / int64(y) - case int: - return x / int64(y) - case int8: - return x / int64(y) - case int16: - return x / int64(y) - case int32: - return x / int64(y) - case int64: - return x / y - case float32: - return float32(x) / y - case float64: - return float64(x) / y - } - case float32: - switch y := b.(type) { - case uint: - return x / float32(y) - case uint8: - return x / float32(y) - case uint16: - return x / float32(y) - case uint32: - return x / float32(y) - case uint64: - return x / float32(y) - case int: - return x / float32(y) - case int8: - return x / float32(y) - case int16: - return x / float32(y) - case int32: - return x / float32(y) - case int64: - return x / float32(y) - case float32: - return x / y - case float64: - return float64(x) / y - } - case float64: - switch y := b.(type) { - case uint: - return x / float64(y) - case uint8: - return x / float64(y) - case uint16: - return x / float64(y) - case uint32: - return x / float64(y) - case uint64: - return x / float64(y) - case int: - return x / float64(y) - case int8: - return x / float64(y) - case int16: - return x / float64(y) - case int32: - return x / float64(y) - case int64: - return x / float64(y) - case float32: - return x / float64(y) - case float64: - return x / y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "/", b)) -} - -func modulo(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return x % y - case uint8: - return uint8(x) % y - case uint16: - return uint16(x) % y - case uint32: - return uint32(x) % y - case uint64: - return uint64(x) % y - case int: - return int(x) % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case uint8: - switch y := b.(type) { - case uint: - return x % uint8(y) - case uint8: - return x % y - case uint16: - return uint16(x) % y - case uint32: - return uint32(x) % y - case uint64: - return uint64(x) % y - case int: - return int(x) % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case uint16: - switch y := b.(type) { - case uint: - return x % uint16(y) - case uint8: - return x % uint16(y) - case uint16: - return x % y - case uint32: - return uint32(x) % y - case uint64: - return uint64(x) % y - case int: - return int(x) % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case uint32: - switch y := b.(type) { - case uint: - return x % uint32(y) - case uint8: - return x % uint32(y) - case uint16: - return x % uint32(y) - case uint32: - return x % y - case uint64: - return uint64(x) % y - case int: - return int(x) % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case uint64: - switch y := b.(type) { - case uint: - return x % uint64(y) - case uint8: - return x % uint64(y) - case uint16: - return x % uint64(y) - case uint32: - return x % uint64(y) - case uint64: - return x % y - case int: - return int(x) % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case int: - switch y := b.(type) { - case uint: - return x % int(y) - case uint8: - return x % int(y) - case uint16: - return x % int(y) - case uint32: - return x % int(y) - case uint64: - return x % int(y) - case int: - return x % y - case int8: - return int8(x) % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case int8: - switch y := b.(type) { - case uint: - return x % int8(y) - case uint8: - return x % int8(y) - case uint16: - return x % int8(y) - case uint32: - return x % int8(y) - case uint64: - return x % int8(y) - case int: - return x % int8(y) - case int8: - return x % y - case int16: - return int16(x) % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case int16: - switch y := b.(type) { - case uint: - return x % int16(y) - case uint8: - return x % int16(y) - case uint16: - return x % int16(y) - case uint32: - return x % int16(y) - case uint64: - return x % int16(y) - case int: - return x % int16(y) - case int8: - return x % int16(y) - case int16: - return x % y - case int32: - return int32(x) % y - case int64: - return int64(x) % y - } - case int32: - switch y := b.(type) { - case uint: - return x % int32(y) - case uint8: - return x % int32(y) - case uint16: - return x % int32(y) - case uint32: - return x % int32(y) - case uint64: - return x % int32(y) - case int: - return x % int32(y) - case int8: - return x % int32(y) - case int16: - return x % int32(y) - case int32: - return x % y - case int64: - return int64(x) % y - } - case int64: - switch y := b.(type) { - case uint: - return x % int64(y) - case uint8: - return x % int64(y) - case uint16: - return x % int64(y) - case uint32: - return x % int64(y) - case uint64: - return x % int64(y) - case int: - return x % int64(y) - case int8: - return x % int64(y) - case int16: - return x % int64(y) - case int32: - return x % int64(y) - case int64: - return x % y - } - } - panic(fmt.Sprintf("invalid operation: %T %v %T", a, "%", b)) -} diff --git a/vendor/github.com/antonmedv/expr/vm/opcodes.go b/vendor/github.com/antonmedv/expr/vm/opcodes.go index 7f2dd37e9b..b3117e73c2 100644 --- a/vendor/github.com/antonmedv/expr/vm/opcodes.go +++ b/vendor/github.com/antonmedv/expr/vm/opcodes.go @@ -1,12 +1,19 @@ package vm +type Opcode byte + const ( - OpPush byte = iota + OpPush Opcode = iota + OpPushInt OpPop - OpRot + OpLoadConst + OpLoadField + OpLoadFast + OpLoadMethod + OpLoadFunc OpFetch - OpFetchNilSafe - OpFetchMap + OpFetchField + OpMethod OpTrue OpFalse OpNil @@ -18,6 +25,9 @@ const ( OpJump OpJumpIfTrue OpJumpIfFalse + OpJumpIfNil + OpJumpIfNotNil + OpJumpIfEnd OpJumpBackward OpIn OpLess @@ -36,21 +46,26 @@ const ( OpContains OpStartsWith OpEndsWith - OpIndex OpSlice - OpProperty - OpPropertyNilSafe OpCall + OpCall0 + OpCall1 + OpCall2 + OpCall3 + OpCallN OpCallFast - OpMethod - OpMethodNilSafe + OpCallTyped + OpBuiltin OpArray OpMap OpLen OpCast - OpStore - OpLoad - OpInc + OpDeref + OpIncrementIt + OpIncrementCount + OpGetCount + OpGetLen + OpPointer OpBegin OpEnd // This opcode must be at the end of this list. ) diff --git a/vendor/github.com/antonmedv/expr/vm/program.go b/vendor/github.com/antonmedv/expr/vm/program.go index 5a41f8af4f..d424df14f4 100644 --- a/vendor/github.com/antonmedv/expr/vm/program.go +++ b/vendor/github.com/antonmedv/expr/vm/program.go @@ -1,83 +1,110 @@ package vm import ( - "encoding/binary" + "bytes" "fmt" + "reflect" "regexp" + "strings" + "text/tabwriter" + "github.com/antonmedv/expr/ast" + "github.com/antonmedv/expr/builtin" "github.com/antonmedv/expr/file" + "github.com/antonmedv/expr/vm/runtime" ) type Program struct { + Node ast.Node Source *file.Source - Locations map[int]file.Location + Locations []file.Location Constants []interface{} - Bytecode []byte + Bytecode []Opcode + Arguments []int + Functions []Function } func (program *Program) Disassemble() string { - out := "" + var buf bytes.Buffer + w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0) ip := 0 for ip < len(program.Bytecode) { pp := ip op := program.Bytecode[ip] - ip++ - - readArg := func() uint16 { - if ip+1 >= len(program.Bytecode) { - return 0 - } - - i := binary.LittleEndian.Uint16([]byte{program.Bytecode[ip], program.Bytecode[ip+1]}) - ip += 2 - return i - } + arg := program.Arguments[ip] + ip += 1 code := func(label string) { - out += fmt.Sprintf("%v\t%v\n", pp, label) + _, _ = fmt.Fprintf(w, "%v\t%v\n", pp, label) } jump := func(label string) { - a := readArg() - out += fmt.Sprintf("%v\t%v\t%v\t(%v)\n", pp, label, a, ip+int(a)) + _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip+arg) } - back := func(label string) { - a := readArg() - out += fmt.Sprintf("%v\t%v\t%v\t(%v)\n", pp, label, a, ip-int(a)) + jumpBack := func(label string) { + _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip-arg) } argument := func(label string) { - a := readArg() - out += fmt.Sprintf("%v\t%v\t%v\n", pp, label, a) + _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\n", pp, label, arg) } constant := func(label string) { - a := readArg() var c interface{} - if int(a) < len(program.Constants) { - c = program.Constants[a] + if arg < len(program.Constants) { + c = program.Constants[arg] + } else { + c = "out of range" } if r, ok := c.(*regexp.Regexp); ok { c = r.String() } - out += fmt.Sprintf("%v\t%v\t%v\t%#v\n", pp, label, a, c) + if field, ok := c.(*runtime.Field); ok { + c = fmt.Sprintf("{%v %v}", strings.Join(field.Path, "."), field.Index) + } + if method, ok := c.(*runtime.Method); ok { + c = fmt.Sprintf("{%v %v}", method.Name, method.Index) + } + _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, label, arg, c) + } + builtIn := func(label string) { + f, ok := builtin.Builtins[arg] + if !ok { + panic(fmt.Sprintf("unknown builtin %v", arg)) + } + _, _ = fmt.Fprintf(w, "%v\t%v\t%v\n", pp, "OpBuiltin", f.Name) } switch op { case OpPush: constant("OpPush") + case OpPushInt: + argument("OpPushInt") + case OpPop: code("OpPop") - case OpRot: - code("OpRot") + case OpLoadConst: + constant("OpLoadConst") + + case OpLoadField: + constant("OpLoadField") + + case OpLoadFast: + constant("OpLoadFast") + + case OpLoadMethod: + constant("OpLoadMethod") + + case OpLoadFunc: + argument("OpLoadFunc") case OpFetch: - constant("OpFetch") + code("OpFetch") - case OpFetchNilSafe: - constant("OpFetchNilSafe") + case OpFetchField: + constant("OpFetchField") - case OpFetchMap: - constant("OpFetchMap") + case OpMethod: + constant("OpMethod") case OpTrue: code("OpTrue") @@ -112,8 +139,17 @@ func (program *Program) Disassemble() string { case OpJumpIfFalse: jump("OpJumpIfFalse") + case OpJumpIfNil: + jump("OpJumpIfNil") + + case OpJumpIfNotNil: + jump("OpJumpIfNotNil") + + case OpJumpIfEnd: + jump("OpJumpIfEnd") + case OpJumpBackward: - back("OpJumpBackward") + jumpBack("OpJumpBackward") case OpIn: code("OpIn") @@ -166,29 +202,36 @@ func (program *Program) Disassemble() string { case OpEndsWith: code("OpEndsWith") - case OpIndex: - code("OpIndex") - case OpSlice: code("OpSlice") - case OpProperty: - constant("OpProperty") + case OpCall: + argument("OpCall") - case OpPropertyNilSafe: - constant("OpPropertyNilSafe") + case OpCall0: + argument("OpCall0") - case OpCall: - constant("OpCall") + case OpCall1: + argument("OpCall1") + + case OpCall2: + argument("OpCall2") + + case OpCall3: + argument("OpCall3") + + case OpCallN: + argument("OpCallN") case OpCallFast: - constant("OpCallFast") + argument("OpCallFast") - case OpMethod: - constant("OpMethod") + case OpCallTyped: + signature := reflect.TypeOf(FuncTypes[arg]).Elem().String() + _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, "OpCallTyped", arg, signature) - case OpMethodNilSafe: - constant("OpMethodNilSafe") + case OpBuiltin: + builtIn("OpBuiltin") case OpArray: code("OpArray") @@ -202,14 +245,23 @@ func (program *Program) Disassemble() string { case OpCast: argument("OpCast") - case OpStore: - constant("OpStore") + case OpDeref: + code("OpDeref") + + case OpIncrementIt: + code("OpIncrementIt") + + case OpIncrementCount: + code("OpIncrementCount") + + case OpGetCount: + code("OpGetCount") - case OpLoad: - constant("OpLoad") + case OpGetLen: + code("OpGetLen") - case OpInc: - constant("OpInc") + case OpPointer: + code("OpPointer") case OpBegin: code("OpBegin") @@ -218,8 +270,9 @@ func (program *Program) Disassemble() string { code("OpEnd") default: - out += fmt.Sprintf("%v\t%#x\n", pp, op) + _, _ = fmt.Fprintf(w, "%v\t%#x\n", ip, op) } } - return out + _ = w.Flush() + return buf.String() } diff --git a/vendor/github.com/antonmedv/expr/vm/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime.go deleted file mode 100644 index 2e0091314b..0000000000 --- a/vendor/github.com/antonmedv/expr/vm/runtime.go +++ /dev/null @@ -1,370 +0,0 @@ -package vm - -//go:generate go run ./generate - -import ( - "fmt" - "math" - "reflect" -) - -type Call struct { - Name string - Size int -} - -type Scope map[string]interface{} - -type Fetcher interface { - Fetch(interface{}) interface{} -} - -func fetch(from, i interface{}, nilsafe bool) interface{} { - if fetcher, ok := from.(Fetcher); ok { - value := fetcher.Fetch(i) - if value != nil { - return value - } - if !nilsafe { - panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) - } - return nil - } - - v := reflect.ValueOf(from) - kind := v.Kind() - - // Structures can be access through a pointer or through a value, when they - // are accessed through a pointer we don't want to copy them to a value. - if kind == reflect.Ptr && reflect.Indirect(v).Kind() == reflect.Struct { - v = reflect.Indirect(v) - kind = v.Kind() - } - - switch kind { - - case reflect.Array, reflect.Slice, reflect.String: - value := v.Index(toInt(i)) - if value.IsValid() && value.CanInterface() { - return value.Interface() - } - - case reflect.Map: - value := v.MapIndex(reflect.ValueOf(i)) - if value.IsValid() { - if value.CanInterface() { - return value.Interface() - } - } else { - elem := reflect.TypeOf(from).Elem() - return reflect.Zero(elem).Interface() - } - - case reflect.Struct: - value := v.FieldByName(reflect.ValueOf(i).String()) - if value.IsValid() && value.CanInterface() { - return value.Interface() - } - } - if !nilsafe { - panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) - } - return nil -} - -func slice(array, from, to interface{}) interface{} { - v := reflect.ValueOf(array) - - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - length := v.Len() - a, b := toInt(from), toInt(to) - - if b > length { - b = length - } - if a > b { - a = b - } - - value := v.Slice(a, b) - if value.IsValid() && value.CanInterface() { - return value.Interface() - } - - case reflect.Ptr: - value := v.Elem() - if value.IsValid() && value.CanInterface() { - return slice(value.Interface(), from, to) - } - - } - panic(fmt.Sprintf("cannot slice %v", from)) -} - -func FetchFn(from interface{}, name string) reflect.Value { - v := reflect.ValueOf(from) - - // Methods can be defined on any type. - if v.NumMethod() > 0 { - method := v.MethodByName(name) - if method.IsValid() { - return method - } - } - - d := v - if v.Kind() == reflect.Ptr { - d = v.Elem() - } - - switch d.Kind() { - case reflect.Map: - value := d.MapIndex(reflect.ValueOf(name)) - if value.IsValid() && value.CanInterface() { - return value.Elem() - } - case reflect.Struct: - // If struct has not method, maybe it has func field. - // To access this field we need dereference value. - value := d.FieldByName(name) - if value.IsValid() { - return value - } - } - panic(fmt.Sprintf(`cannot get "%v" from %T`, name, from)) -} - -func FetchFnNil(from interface{}, name string) reflect.Value { - if v := reflect.ValueOf(from); !v.IsValid() { - return v - } - return FetchFn(from, name) -} - -func in(needle interface{}, array interface{}) bool { - if array == nil { - return false - } - v := reflect.ValueOf(array) - - switch v.Kind() { - - case reflect.Array, reflect.Slice: - for i := 0; i < v.Len(); i++ { - value := v.Index(i) - if value.IsValid() && value.CanInterface() { - if equal(value.Interface(), needle).(bool) { - return true - } - } - } - return false - - case reflect.Map: - n := reflect.ValueOf(needle) - if !n.IsValid() { - panic(fmt.Sprintf("cannot use %T as index to %T", needle, array)) - } - value := v.MapIndex(n) - if value.IsValid() { - return true - } - return false - - case reflect.Struct: - n := reflect.ValueOf(needle) - if !n.IsValid() || n.Kind() != reflect.String { - panic(fmt.Sprintf("cannot use %T as field name of %T", needle, array)) - } - value := v.FieldByName(n.String()) - if value.IsValid() { - return true - } - return false - - case reflect.Ptr: - value := v.Elem() - if value.IsValid() && value.CanInterface() { - return in(needle, value.Interface()) - } - return false - } - - panic(fmt.Sprintf(`operator "in"" not defined on %T`, array)) -} - -func length(a interface{}) int { - v := reflect.ValueOf(a) - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return v.Len() - default: - panic(fmt.Sprintf("invalid argument for len (type %T)", a)) - } -} - -func negate(i interface{}) interface{} { - switch v := i.(type) { - case float32: - return -v - case float64: - return -v - - case int: - return -v - case int8: - return -v - case int16: - return -v - case int32: - return -v - case int64: - return -v - - case uint: - return -v - case uint8: - return -v - case uint16: - return -v - case uint32: - return -v - case uint64: - return -v - - default: - panic(fmt.Sprintf("invalid operation: - %T", v)) - } -} - -func exponent(a, b interface{}) float64 { - return math.Pow(toFloat64(a), toFloat64(b)) -} - -func makeRange(min, max int) []int { - size := max - min + 1 - if size <= 0 { - return []int{} - } - rng := make([]int, size) - for i := range rng { - rng[i] = min + i - } - return rng -} - -func toInt(a interface{}) int { - switch x := a.(type) { - case float32: - return int(x) - case float64: - return int(x) - - case int: - return x - case int8: - return int(x) - case int16: - return int(x) - case int32: - return int(x) - case int64: - return int(x) - - case uint: - return int(x) - case uint8: - return int(x) - case uint16: - return int(x) - case uint32: - return int(x) - case uint64: - return int(x) - - default: - panic(fmt.Sprintf("invalid operation: int(%T)", x)) - } -} - -func toInt64(a interface{}) int64 { - switch x := a.(type) { - case float32: - return int64(x) - case float64: - return int64(x) - - case int: - return int64(x) - case int8: - return int64(x) - case int16: - return int64(x) - case int32: - return int64(x) - case int64: - return x - - case uint: - return int64(x) - case uint8: - return int64(x) - case uint16: - return int64(x) - case uint32: - return int64(x) - case uint64: - return int64(x) - - default: - panic(fmt.Sprintf("invalid operation: int64(%T)", x)) - } -} - -func toFloat64(a interface{}) float64 { - switch x := a.(type) { - case float32: - return float64(x) - case float64: - return x - - case int: - return float64(x) - case int8: - return float64(x) - case int16: - return float64(x) - case int32: - return float64(x) - case int64: - return float64(x) - - case uint: - return float64(x) - case uint8: - return float64(x) - case uint16: - return float64(x) - case uint32: - return float64(x) - case uint64: - return float64(x) - - default: - panic(fmt.Sprintf("invalid operation: float64(%T)", x)) - } -} - -func isNil(v interface{}) bool { - if v == nil { - return true - } - r := reflect.ValueOf(v) - switch r.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: - return r.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/generated.go b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go new file mode 100644 index 0000000000..09a4a200ed --- /dev/null +++ b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go @@ -0,0 +1,3288 @@ +// Code generated by vm/runtime/helpers/main.go. DO NOT EDIT. + +package runtime + +import ( + "fmt" + "reflect" + "time" +) + +func Equal(a, b interface{}) bool { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) == int(y) + case uint8: + return int(x) == int(y) + case uint16: + return int(x) == int(y) + case uint32: + return int(x) == int(y) + case uint64: + return int(x) == int(y) + case int: + return int(x) == int(y) + case int8: + return int(x) == int(y) + case int16: + return int(x) == int(y) + case int32: + return int(x) == int(y) + case int64: + return int(x) == int(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) == float64(y) + case uint8: + return float64(x) == float64(y) + case uint16: + return float64(x) == float64(y) + case uint32: + return float64(x) == float64(y) + case uint64: + return float64(x) == float64(y) + case int: + return float64(x) == float64(y) + case int8: + return float64(x) == float64(y) + case int16: + return float64(x) == float64(y) + case int32: + return float64(x) == float64(y) + case int64: + return float64(x) == float64(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) == float64(y) + case uint8: + return float64(x) == float64(y) + case uint16: + return float64(x) == float64(y) + case uint32: + return float64(x) == float64(y) + case uint64: + return float64(x) == float64(y) + case int: + return float64(x) == float64(y) + case int8: + return float64(x) == float64(y) + case int16: + return float64(x) == float64(y) + case int32: + return float64(x) == float64(y) + case int64: + return float64(x) == float64(y) + case float32: + return float64(x) == float64(y) + case float64: + return float64(x) == float64(y) + } + case string: + switch y := b.(type) { + case string: + return x == y + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.Equal(y) + } + } + if IsNil(a) && IsNil(b) { + return true + } + return reflect.DeepEqual(a, b) +} + +func Less(a, b interface{}) bool { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) < int(y) + case uint8: + return int(x) < int(y) + case uint16: + return int(x) < int(y) + case uint32: + return int(x) < int(y) + case uint64: + return int(x) < int(y) + case int: + return int(x) < int(y) + case int8: + return int(x) < int(y) + case int16: + return int(x) < int(y) + case int32: + return int(x) < int(y) + case int64: + return int(x) < int(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) < float64(y) + case uint8: + return float64(x) < float64(y) + case uint16: + return float64(x) < float64(y) + case uint32: + return float64(x) < float64(y) + case uint64: + return float64(x) < float64(y) + case int: + return float64(x) < float64(y) + case int8: + return float64(x) < float64(y) + case int16: + return float64(x) < float64(y) + case int32: + return float64(x) < float64(y) + case int64: + return float64(x) < float64(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) < float64(y) + case uint8: + return float64(x) < float64(y) + case uint16: + return float64(x) < float64(y) + case uint32: + return float64(x) < float64(y) + case uint64: + return float64(x) < float64(y) + case int: + return float64(x) < float64(y) + case int8: + return float64(x) < float64(y) + case int16: + return float64(x) < float64(y) + case int32: + return float64(x) < float64(y) + case int64: + return float64(x) < float64(y) + case float32: + return float64(x) < float64(y) + case float64: + return float64(x) < float64(y) + } + case string: + switch y := b.(type) { + case string: + return x < y + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.Before(y) + } + } + panic(fmt.Sprintf("invalid operation: %T < %T", a, b)) +} + +func More(a, b interface{}) bool { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) > int(y) + case uint8: + return int(x) > int(y) + case uint16: + return int(x) > int(y) + case uint32: + return int(x) > int(y) + case uint64: + return int(x) > int(y) + case int: + return int(x) > int(y) + case int8: + return int(x) > int(y) + case int16: + return int(x) > int(y) + case int32: + return int(x) > int(y) + case int64: + return int(x) > int(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) > float64(y) + case uint8: + return float64(x) > float64(y) + case uint16: + return float64(x) > float64(y) + case uint32: + return float64(x) > float64(y) + case uint64: + return float64(x) > float64(y) + case int: + return float64(x) > float64(y) + case int8: + return float64(x) > float64(y) + case int16: + return float64(x) > float64(y) + case int32: + return float64(x) > float64(y) + case int64: + return float64(x) > float64(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) > float64(y) + case uint8: + return float64(x) > float64(y) + case uint16: + return float64(x) > float64(y) + case uint32: + return float64(x) > float64(y) + case uint64: + return float64(x) > float64(y) + case int: + return float64(x) > float64(y) + case int8: + return float64(x) > float64(y) + case int16: + return float64(x) > float64(y) + case int32: + return float64(x) > float64(y) + case int64: + return float64(x) > float64(y) + case float32: + return float64(x) > float64(y) + case float64: + return float64(x) > float64(y) + } + case string: + switch y := b.(type) { + case string: + return x > y + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.After(y) + } + } + panic(fmt.Sprintf("invalid operation: %T > %T", a, b)) +} + +func LessOrEqual(a, b interface{}) bool { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) <= int(y) + case uint8: + return int(x) <= int(y) + case uint16: + return int(x) <= int(y) + case uint32: + return int(x) <= int(y) + case uint64: + return int(x) <= int(y) + case int: + return int(x) <= int(y) + case int8: + return int(x) <= int(y) + case int16: + return int(x) <= int(y) + case int32: + return int(x) <= int(y) + case int64: + return int(x) <= int(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) <= float64(y) + case uint8: + return float64(x) <= float64(y) + case uint16: + return float64(x) <= float64(y) + case uint32: + return float64(x) <= float64(y) + case uint64: + return float64(x) <= float64(y) + case int: + return float64(x) <= float64(y) + case int8: + return float64(x) <= float64(y) + case int16: + return float64(x) <= float64(y) + case int32: + return float64(x) <= float64(y) + case int64: + return float64(x) <= float64(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) <= float64(y) + case uint8: + return float64(x) <= float64(y) + case uint16: + return float64(x) <= float64(y) + case uint32: + return float64(x) <= float64(y) + case uint64: + return float64(x) <= float64(y) + case int: + return float64(x) <= float64(y) + case int8: + return float64(x) <= float64(y) + case int16: + return float64(x) <= float64(y) + case int32: + return float64(x) <= float64(y) + case int64: + return float64(x) <= float64(y) + case float32: + return float64(x) <= float64(y) + case float64: + return float64(x) <= float64(y) + } + case string: + switch y := b.(type) { + case string: + return x <= y + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.Before(y) || x.Equal(y) + } + } + panic(fmt.Sprintf("invalid operation: %T <= %T", a, b)) +} + +func MoreOrEqual(a, b interface{}) bool { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) >= int(y) + case uint8: + return int(x) >= int(y) + case uint16: + return int(x) >= int(y) + case uint32: + return int(x) >= int(y) + case uint64: + return int(x) >= int(y) + case int: + return int(x) >= int(y) + case int8: + return int(x) >= int(y) + case int16: + return int(x) >= int(y) + case int32: + return int(x) >= int(y) + case int64: + return int(x) >= int(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) >= float64(y) + case uint8: + return float64(x) >= float64(y) + case uint16: + return float64(x) >= float64(y) + case uint32: + return float64(x) >= float64(y) + case uint64: + return float64(x) >= float64(y) + case int: + return float64(x) >= float64(y) + case int8: + return float64(x) >= float64(y) + case int16: + return float64(x) >= float64(y) + case int32: + return float64(x) >= float64(y) + case int64: + return float64(x) >= float64(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) >= float64(y) + case uint8: + return float64(x) >= float64(y) + case uint16: + return float64(x) >= float64(y) + case uint32: + return float64(x) >= float64(y) + case uint64: + return float64(x) >= float64(y) + case int: + return float64(x) >= float64(y) + case int8: + return float64(x) >= float64(y) + case int16: + return float64(x) >= float64(y) + case int32: + return float64(x) >= float64(y) + case int64: + return float64(x) >= float64(y) + case float32: + return float64(x) >= float64(y) + case float64: + return float64(x) >= float64(y) + } + case string: + switch y := b.(type) { + case string: + return x >= y + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.After(y) || x.Equal(y) + } + } + panic(fmt.Sprintf("invalid operation: %T >= %T", a, b)) +} + +func Add(a, b interface{}) interface{} { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) + int(y) + case uint8: + return int(x) + int(y) + case uint16: + return int(x) + int(y) + case uint32: + return int(x) + int(y) + case uint64: + return int(x) + int(y) + case int: + return int(x) + int(y) + case int8: + return int(x) + int(y) + case int16: + return int(x) + int(y) + case int32: + return int(x) + int(y) + case int64: + return int(x) + int(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) + float64(y) + case uint8: + return float64(x) + float64(y) + case uint16: + return float64(x) + float64(y) + case uint32: + return float64(x) + float64(y) + case uint64: + return float64(x) + float64(y) + case int: + return float64(x) + float64(y) + case int8: + return float64(x) + float64(y) + case int16: + return float64(x) + float64(y) + case int32: + return float64(x) + float64(y) + case int64: + return float64(x) + float64(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) + float64(y) + case uint8: + return float64(x) + float64(y) + case uint16: + return float64(x) + float64(y) + case uint32: + return float64(x) + float64(y) + case uint64: + return float64(x) + float64(y) + case int: + return float64(x) + float64(y) + case int8: + return float64(x) + float64(y) + case int16: + return float64(x) + float64(y) + case int32: + return float64(x) + float64(y) + case int64: + return float64(x) + float64(y) + case float32: + return float64(x) + float64(y) + case float64: + return float64(x) + float64(y) + } + case string: + switch y := b.(type) { + case string: + return x + y + } + case time.Time: + switch y := b.(type) { + case time.Duration: + return x.Add(y) + } + case time.Duration: + switch y := b.(type) { + case time.Time: + return y.Add(x) + } + } + panic(fmt.Sprintf("invalid operation: %T + %T", a, b)) +} + +func Subtract(a, b interface{}) interface{} { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) - int(y) + case uint8: + return int(x) - int(y) + case uint16: + return int(x) - int(y) + case uint32: + return int(x) - int(y) + case uint64: + return int(x) - int(y) + case int: + return int(x) - int(y) + case int8: + return int(x) - int(y) + case int16: + return int(x) - int(y) + case int32: + return int(x) - int(y) + case int64: + return int(x) - int(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) - float64(y) + case uint8: + return float64(x) - float64(y) + case uint16: + return float64(x) - float64(y) + case uint32: + return float64(x) - float64(y) + case uint64: + return float64(x) - float64(y) + case int: + return float64(x) - float64(y) + case int8: + return float64(x) - float64(y) + case int16: + return float64(x) - float64(y) + case int32: + return float64(x) - float64(y) + case int64: + return float64(x) - float64(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) - float64(y) + case uint8: + return float64(x) - float64(y) + case uint16: + return float64(x) - float64(y) + case uint32: + return float64(x) - float64(y) + case uint64: + return float64(x) - float64(y) + case int: + return float64(x) - float64(y) + case int8: + return float64(x) - float64(y) + case int16: + return float64(x) - float64(y) + case int32: + return float64(x) - float64(y) + case int64: + return float64(x) - float64(y) + case float32: + return float64(x) - float64(y) + case float64: + return float64(x) - float64(y) + } + case time.Time: + switch y := b.(type) { + case time.Time: + return x.Sub(y) + } + } + panic(fmt.Sprintf("invalid operation: %T - %T", a, b)) +} + +func Multiply(a, b interface{}) interface{} { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) * int(y) + case uint8: + return int(x) * int(y) + case uint16: + return int(x) * int(y) + case uint32: + return int(x) * int(y) + case uint64: + return int(x) * int(y) + case int: + return int(x) * int(y) + case int8: + return int(x) * int(y) + case int16: + return int(x) * int(y) + case int32: + return int(x) * int(y) + case int64: + return int(x) * int(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) * float64(y) + case uint8: + return float64(x) * float64(y) + case uint16: + return float64(x) * float64(y) + case uint32: + return float64(x) * float64(y) + case uint64: + return float64(x) * float64(y) + case int: + return float64(x) * float64(y) + case int8: + return float64(x) * float64(y) + case int16: + return float64(x) * float64(y) + case int32: + return float64(x) * float64(y) + case int64: + return float64(x) * float64(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) * float64(y) + case uint8: + return float64(x) * float64(y) + case uint16: + return float64(x) * float64(y) + case uint32: + return float64(x) * float64(y) + case uint64: + return float64(x) * float64(y) + case int: + return float64(x) * float64(y) + case int8: + return float64(x) * float64(y) + case int16: + return float64(x) * float64(y) + case int32: + return float64(x) * float64(y) + case int64: + return float64(x) * float64(y) + case float32: + return float64(x) * float64(y) + case float64: + return float64(x) * float64(y) + } + } + panic(fmt.Sprintf("invalid operation: %T * %T", a, b)) +} + +func Divide(a, b interface{}) float64 { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case uint8: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case uint16: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case uint32: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case uint64: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case int: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case int8: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case int16: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case int32: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case int64: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case float32: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + case float64: + switch y := b.(type) { + case uint: + return float64(x) / float64(y) + case uint8: + return float64(x) / float64(y) + case uint16: + return float64(x) / float64(y) + case uint32: + return float64(x) / float64(y) + case uint64: + return float64(x) / float64(y) + case int: + return float64(x) / float64(y) + case int8: + return float64(x) / float64(y) + case int16: + return float64(x) / float64(y) + case int32: + return float64(x) / float64(y) + case int64: + return float64(x) / float64(y) + case float32: + return float64(x) / float64(y) + case float64: + return float64(x) / float64(y) + } + } + panic(fmt.Sprintf("invalid operation: %T / %T", a, b)) +} + +func Modulo(a, b interface{}) int { + switch x := a.(type) { + case uint: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case uint8: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case uint16: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case uint32: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case uint64: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case int: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case int8: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case int16: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case int32: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + case int64: + switch y := b.(type) { + case uint: + return int(x) % int(y) + case uint8: + return int(x) % int(y) + case uint16: + return int(x) % int(y) + case uint32: + return int(x) % int(y) + case uint64: + return int(x) % int(y) + case int: + return int(x) % int(y) + case int8: + return int(x) % int(y) + case int16: + return int(x) % int(y) + case int32: + return int(x) % int(y) + case int64: + return int(x) % int(y) + } + } + panic(fmt.Sprintf("invalid operation: %T %% %T", a, b)) +} diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go new file mode 100644 index 0000000000..b2eeb65d83 --- /dev/null +++ b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go @@ -0,0 +1,517 @@ +package runtime + +//go:generate sh -c "go run ./helpers > ./generated.go" + +import ( + "fmt" + "math" + "reflect" + "strconv" +) + +func Fetch(from, i interface{}) interface{} { + v := reflect.ValueOf(from) + kind := v.Kind() + if kind == reflect.Invalid { + panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) + } + + // Methods can be defined on any type. + if v.NumMethod() > 0 { + if methodName, ok := i.(string); ok { + method := v.MethodByName(methodName) + if method.IsValid() { + return method.Interface() + } + } + } + + // Structs, maps, and slices can be access through a pointer or through + // a value, when they are accessed through a pointer we don't want to + // copy them to a value. + if kind == reflect.Ptr { + v = reflect.Indirect(v) + kind = v.Kind() + } + + // TODO: We can create separate opcodes for each of the cases below to make + // the little bit faster. + switch kind { + case reflect.Array, reflect.Slice, reflect.String: + index := ToInt(i) + if index < 0 { + index = v.Len() + index + } + value := v.Index(index) + if value.IsValid() { + return value.Interface() + } + + case reflect.Map: + var value reflect.Value + if i == nil { + value = v.MapIndex(reflect.Zero(v.Type().Key())) + } else { + value = v.MapIndex(reflect.ValueOf(i)) + } + if value.IsValid() { + return value.Interface() + } else { + elem := reflect.TypeOf(from).Elem() + return reflect.Zero(elem).Interface() + } + + case reflect.Struct: + fieldName := i.(string) + value := v.FieldByNameFunc(func(name string) bool { + field, _ := v.Type().FieldByName(name) + if field.Tag.Get("expr") == fieldName { + return true + } + return name == fieldName + }) + if value.IsValid() { + return value.Interface() + } + } + panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) +} + +type Field struct { + Index []int + Path []string +} + +func FetchField(from interface{}, field *Field) interface{} { + v := reflect.ValueOf(from) + kind := v.Kind() + if kind != reflect.Invalid { + if kind == reflect.Ptr { + v = reflect.Indirect(v) + } + // We can use v.FieldByIndex here, but it will panic if the field + // is not exists. And we need to recover() to generate a more + // user-friendly error message. + // Also, our fieldByIndex() function is slightly faster than the + // v.FieldByIndex() function as we don't need to verify what a field + // is a struct as we already did it on compilation step. + value := fieldByIndex(v, field) + if value.IsValid() { + return value.Interface() + } + } + panic(fmt.Sprintf("cannot get %v from %T", field.Path[0], from)) +} + +func fieldByIndex(v reflect.Value, field *Field) reflect.Value { + if len(field.Index) == 1 { + return v.Field(field.Index[0]) + } + for i, x := range field.Index { + if i > 0 { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + panic(fmt.Sprintf("cannot get %v from %v", field.Path[i], field.Path[i-1])) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} + +type Method struct { + Index int + Name string +} + +func FetchMethod(from interface{}, method *Method) interface{} { + v := reflect.ValueOf(from) + kind := v.Kind() + if kind != reflect.Invalid { + // Methods can be defined on any type, no need to dereference. + method := v.Method(method.Index) + if method.IsValid() { + return method.Interface() + } + } + panic(fmt.Sprintf("cannot fetch %v from %T", method.Name, from)) +} + +func Deref(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i) + + if v.Kind() == reflect.Interface { + if v.IsNil() { + return i + } + v = v.Elem() + } + + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return i + } + indirect := reflect.Indirect(v) + switch indirect.Kind() { + case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: + default: + v = v.Elem() + } + } + + if v.IsValid() { + return v.Interface() + } + + panic(fmt.Sprintf("cannot dereference %v", i)) +} + +func Slice(array, from, to interface{}) interface{} { + v := reflect.ValueOf(array) + + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + length := v.Len() + a, b := ToInt(from), ToInt(to) + if a < 0 { + a = length + a + } + if b < 0 { + b = length + b + } + if b > length { + b = length + } + if a > b { + a = b + } + value := v.Slice(a, b) + if value.IsValid() { + return value.Interface() + } + + case reflect.Ptr: + value := v.Elem() + if value.IsValid() { + return Slice(value.Interface(), from, to) + } + + } + panic(fmt.Sprintf("cannot slice %v", from)) +} + +func In(needle interface{}, array interface{}) bool { + if array == nil { + return false + } + v := reflect.ValueOf(array) + + switch v.Kind() { + + case reflect.Array, reflect.Slice: + for i := 0; i < v.Len(); i++ { + value := v.Index(i) + if value.IsValid() { + if Equal(value.Interface(), needle) { + return true + } + } + } + return false + + case reflect.Map: + var value reflect.Value + if needle == nil { + value = v.MapIndex(reflect.Zero(v.Type().Key())) + } else { + n := reflect.ValueOf(needle) + if !n.IsValid() { + panic(fmt.Sprintf("cannot use %T as index to %T", needle, array)) + } + value = v.MapIndex(n) + } + if value.IsValid() { + return true + } + return false + + case reflect.Struct: + n := reflect.ValueOf(needle) + if !n.IsValid() || n.Kind() != reflect.String { + panic(fmt.Sprintf("cannot use %T as field name of %T", needle, array)) + } + value := v.FieldByName(n.String()) + if value.IsValid() { + return true + } + return false + + case reflect.Ptr: + value := v.Elem() + if value.IsValid() { + return In(needle, value.Interface()) + } + return false + } + + panic(fmt.Sprintf(`operator "in"" not defined on %T`, array)) +} + +func Len(a interface{}) interface{} { + v := reflect.ValueOf(a) + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return v.Len() + default: + panic(fmt.Sprintf("invalid argument for len (type %T)", a)) + } +} + +func Negate(i interface{}) interface{} { + switch v := i.(type) { + case float32: + return -v + case float64: + return -v + case int: + return -v + case int8: + return -v + case int16: + return -v + case int32: + return -v + case int64: + return -v + case uint: + return -v + case uint8: + return -v + case uint16: + return -v + case uint32: + return -v + case uint64: + return -v + default: + panic(fmt.Sprintf("invalid operation: - %T", v)) + } +} + +func Exponent(a, b interface{}) float64 { + return math.Pow(ToFloat64(a), ToFloat64(b)) +} + +func MakeRange(min, max int) []int { + size := max - min + 1 + if size <= 0 { + return []int{} + } + rng := make([]int, size) + for i := range rng { + rng[i] = min + i + } + return rng +} + +func ToInt(a interface{}) int { + switch x := a.(type) { + case float32: + return int(x) + case float64: + return int(x) + case int: + return x + case int8: + return int(x) + case int16: + return int(x) + case int32: + return int(x) + case int64: + return int(x) + case uint: + return int(x) + case uint8: + return int(x) + case uint16: + return int(x) + case uint32: + return int(x) + case uint64: + return int(x) + case string: + i, err := strconv.Atoi(x) + if err != nil { + panic(fmt.Sprintf("invalid operation: int(%s)", x)) + } + return i + default: + panic(fmt.Sprintf("invalid operation: int(%T)", x)) + } +} + +func ToInt64(a interface{}) int64 { + switch x := a.(type) { + case float32: + return int64(x) + case float64: + return int64(x) + case int: + return int64(x) + case int8: + return int64(x) + case int16: + return int64(x) + case int32: + return int64(x) + case int64: + return x + case uint: + return int64(x) + case uint8: + return int64(x) + case uint16: + return int64(x) + case uint32: + return int64(x) + case uint64: + return int64(x) + default: + panic(fmt.Sprintf("invalid operation: int64(%T)", x)) + } +} + +func ToFloat64(a interface{}) float64 { + switch x := a.(type) { + case float32: + return float64(x) + case float64: + return x + case int: + return float64(x) + case int8: + return float64(x) + case int16: + return float64(x) + case int32: + return float64(x) + case int64: + return float64(x) + case uint: + return float64(x) + case uint8: + return float64(x) + case uint16: + return float64(x) + case uint32: + return float64(x) + case uint64: + return float64(x) + case string: + f, err := strconv.ParseFloat(x, 64) + if err != nil { + panic(fmt.Sprintf("invalid operation: float(%s)", x)) + } + return f + default: + panic(fmt.Sprintf("invalid operation: float(%T)", x)) + } +} + +func IsNil(v interface{}) bool { + if v == nil { + return true + } + r := reflect.ValueOf(v) + switch r.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + return r.IsNil() + default: + return false + } +} + +func Abs(x interface{}) interface{} { + switch x.(type) { + case float32: + if x.(float32) < 0 { + return -x.(float32) + } else { + return x + } + case float64: + if x.(float64) < 0 { + return -x.(float64) + } else { + return x + } + case int: + if x.(int) < 0 { + return -x.(int) + } else { + return x + } + case int8: + if x.(int8) < 0 { + return -x.(int8) + } else { + return x + } + case int16: + if x.(int16) < 0 { + return -x.(int16) + } else { + return x + } + case int32: + if x.(int32) < 0 { + return -x.(int32) + } else { + return x + } + case int64: + if x.(int64) < 0 { + return -x.(int64) + } else { + return x + } + case uint: + if x.(uint) < 0 { + return -x.(uint) + } else { + return x + } + case uint8: + if x.(uint8) < 0 { + return -x.(uint8) + } else { + return x + } + case uint16: + if x.(uint16) < 0 { + return -x.(uint16) + } else { + return x + } + case uint32: + if x.(uint32) < 0 { + return -x.(uint32) + } else { + return x + } + case uint64: + if x.(uint64) < 0 { + return -x.(uint64) + } else { + return x + } + } + panic(fmt.Sprintf("invalid argument for abs (type %T)", x)) +} diff --git a/vendor/github.com/antonmedv/expr/vm/vm.go b/vendor/github.com/antonmedv/expr/vm/vm.go index 6957dfa64d..af4fc5bf75 100644 --- a/vendor/github.com/antonmedv/expr/vm/vm.go +++ b/vendor/github.com/antonmedv/expr/vm/vm.go @@ -1,19 +1,22 @@ package vm +//go:generate sh -c "go run ./func_types > ./generated.go" + import ( "fmt" "reflect" "regexp" "strings" + "github.com/antonmedv/expr/builtin" "github.com/antonmedv/expr/file" + "github.com/antonmedv/expr/vm/runtime" ) +var MemoryBudget int = 1e6 var errorType = reflect.TypeOf((*error)(nil)).Elem() -var ( - MemoryBudget int = 1e6 -) +type Function = func(params ...interface{}) (interface{}, error) func Run(program *Program, env interface{}) (interface{}, error) { if program == nil { @@ -25,17 +28,21 @@ func Run(program *Program, env interface{}) (interface{}, error) { } type VM struct { - stack []interface{} - constants []interface{} - bytecode []byte - ip int - pp int - scopes []Scope - debug bool - step chan struct{} - curr chan int - memory int - limit int + stack []interface{} + ip int + scopes []*Scope + debug bool + step chan struct{} + curr chan int + memory int + memoryBudget int +} + +type Scope struct { + Array reflect.Value + It int + Len int + Count int } func Debug() *VM { @@ -47,21 +54,20 @@ func Debug() *VM { return vm } -func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error) { +func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) { defer func() { if r := recover(); r != nil { f := &file.Error{ - Location: program.Locations[vm.pp], + Location: program.Locations[vm.ip-1], Message: fmt.Sprintf("%v", r), } + if err, ok := r.(error); ok { + f.Wrap(err) + } err = f.Bind(program.Source) } }() - vm.limit = MemoryBudget - vm.ip = 0 - vm.pp = 0 - if vm.stack == nil { vm.stack = make([]interface{}, 0, 2) } else { @@ -72,41 +78,54 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error vm.scopes = vm.scopes[0:0] } - vm.bytecode = program.Bytecode - vm.constants = program.Constants - - for vm.ip < len(vm.bytecode) { + vm.memoryBudget = MemoryBudget + vm.memory = 0 + vm.ip = 0 + for vm.ip < len(program.Bytecode) { if vm.debug { <-vm.step } - vm.pp = vm.ip - vm.ip++ - op := vm.bytecode[vm.pp] + op := program.Bytecode[vm.ip] + arg := program.Arguments[vm.ip] + vm.ip += 1 switch op { case OpPush: - vm.push(vm.constant()) + vm.push(program.Constants[arg]) case OpPop: vm.pop() - case OpRot: - b := vm.pop() - a := vm.pop() - vm.push(b) - vm.push(a) + case OpLoadConst: + vm.push(runtime.Fetch(env, program.Constants[arg])) + + case OpLoadField: + vm.push(runtime.FetchField(env, program.Constants[arg].(*runtime.Field))) + + case OpLoadFast: + vm.push(env.(map[string]interface{})[program.Constants[arg].(string)]) + + case OpLoadMethod: + vm.push(runtime.FetchMethod(env, program.Constants[arg].(*runtime.Method))) + + case OpLoadFunc: + vm.push(program.Functions[arg]) case OpFetch: - vm.push(fetch(env, vm.constant(), false)) + b := vm.pop() + a := vm.pop() + vm.push(runtime.Fetch(a, b)) - case OpFetchNilSafe: - vm.push(fetch(env, vm.constant(), true)) + case OpFetchField: + a := vm.pop() + vm.push(runtime.FetchField(a, program.Constants[arg].(*runtime.Field))) - case OpFetchMap: - vm.push(env.(map[string]interface{})[vm.constant().(string)]) + case OpMethod: + a := vm.pop() + vm.push(runtime.FetchMethod(a, program.Constants[arg].(*runtime.Method))) case OpTrue: vm.push(true) @@ -118,7 +137,7 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error vm.push(nil) case OpNegate: - v := negate(vm.pop()) + v := runtime.Negate(vm.pop()) vm.push(v) case OpNot: @@ -128,7 +147,7 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error case OpEqual: b := vm.pop() a := vm.pop() - vm.push(equal(a, b)) + vm.push(runtime.Equal(a, b)) case OpEqualInt: b := vm.pop() @@ -141,90 +160,102 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error vm.push(a.(string) == b.(string)) case OpJump: - offset := vm.arg() - vm.ip += int(offset) + vm.ip += arg case OpJumpIfTrue: - offset := vm.arg() if vm.current().(bool) { - vm.ip += int(offset) + vm.ip += arg } case OpJumpIfFalse: - offset := vm.arg() if !vm.current().(bool) { - vm.ip += int(offset) + vm.ip += arg + } + + case OpJumpIfNil: + if runtime.IsNil(vm.current()) { + vm.ip += arg + } + + case OpJumpIfNotNil: + if !runtime.IsNil(vm.current()) { + vm.ip += arg + } + + case OpJumpIfEnd: + scope := vm.Scope() + if scope.It >= scope.Len { + vm.ip += arg } case OpJumpBackward: - offset := vm.arg() - vm.ip -= int(offset) + vm.ip -= arg case OpIn: b := vm.pop() a := vm.pop() - vm.push(in(a, b)) + vm.push(runtime.In(a, b)) case OpLess: b := vm.pop() a := vm.pop() - vm.push(less(a, b)) + vm.push(runtime.Less(a, b)) case OpMore: b := vm.pop() a := vm.pop() - vm.push(more(a, b)) + vm.push(runtime.More(a, b)) case OpLessOrEqual: b := vm.pop() a := vm.pop() - vm.push(lessOrEqual(a, b)) + vm.push(runtime.LessOrEqual(a, b)) case OpMoreOrEqual: b := vm.pop() a := vm.pop() - vm.push(moreOrEqual(a, b)) + vm.push(runtime.MoreOrEqual(a, b)) case OpAdd: b := vm.pop() a := vm.pop() - vm.push(add(a, b)) + vm.push(runtime.Add(a, b)) case OpSubtract: b := vm.pop() a := vm.pop() - vm.push(subtract(a, b)) + vm.push(runtime.Subtract(a, b)) case OpMultiply: b := vm.pop() a := vm.pop() - vm.push(multiply(a, b)) + vm.push(runtime.Multiply(a, b)) case OpDivide: b := vm.pop() a := vm.pop() - vm.push(divide(a, b)) + vm.push(runtime.Divide(a, b)) case OpModulo: b := vm.pop() a := vm.pop() - vm.push(modulo(a, b)) + vm.push(runtime.Modulo(a, b)) case OpExponent: b := vm.pop() a := vm.pop() - vm.push(exponent(a, b)) + vm.push(runtime.Exponent(a, b)) case OpRange: b := vm.pop() a := vm.pop() - min := toInt(a) - max := toInt(b) + min := runtime.ToInt(a) + max := runtime.ToInt(b) size := max - min + 1 - if vm.memory+size >= vm.limit { + if vm.memory+size >= vm.memoryBudget { panic("memory budget exceeded") } - vm.push(makeRange(min, max)) + vm.push(runtime.MakeRange(min, max)) vm.memory += size case OpMatches: @@ -239,7 +270,7 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error case OpMatchesConst: a := vm.pop() - r := vm.constant().(*regexp.Regexp) + r := program.Constants[arg].(*regexp.Regexp) vm.push(r.MatchString(a.(string))) case OpContains: @@ -257,31 +288,17 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error a := vm.pop() vm.push(strings.HasSuffix(a.(string), b.(string))) - case OpIndex: - b := vm.pop() - a := vm.pop() - vm.push(fetch(a, b, false)) - case OpSlice: from := vm.pop() to := vm.pop() node := vm.pop() - vm.push(slice(node, from, to)) - - case OpProperty: - a := vm.pop() - b := vm.constant() - vm.push(fetch(a, b, false)) - - case OpPropertyNilSafe: - a := vm.pop() - b := vm.constant() - vm.push(fetch(a, b, true)) + vm.push(runtime.Slice(node, from, to)) case OpCall: - call := vm.constant().(Call) - in := make([]reflect.Value, call.Size) - for i := call.Size - 1; i >= 0; i-- { + fn := reflect.ValueOf(vm.pop()) + size := arg + in := make([]reflect.Value, size) + for i := int(size) - 1; i >= 0; i-- { param := vm.pop() if param == nil && reflect.TypeOf(param) == nil { // In case of nil value and nil type use this hack, @@ -291,68 +308,72 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error in[i] = reflect.ValueOf(param) } } - out := FetchFn(env, call.Name).Call(in) + out := fn.Call(in) if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() { - return nil, out[1].Interface().(error) + panic(out[1].Interface().(error)) } vm.push(out[0].Interface()) - case OpCallFast: - call := vm.constant().(Call) - in := make([]interface{}, call.Size) - for i := call.Size - 1; i >= 0; i-- { - in[i] = vm.pop() + case OpCall0: + out, err := program.Functions[arg]() + if err != nil { + panic(err) } - fn := FetchFn(env, call.Name).Interface() - if typed, ok := fn.(func(...interface{}) interface{}); ok { - vm.push(typed(in...)) - } else if typed, ok := fn.(func(...interface{}) (interface{}, error)); ok { - res, err := typed(in...) - if err != nil { - return nil, err - } - vm.push(res) + vm.push(out) + + case OpCall1: + a := vm.pop() + out, err := program.Functions[arg](a) + if err != nil { + panic(err) } + vm.push(out) - case OpMethod: - call := vm.constants[vm.arg()].(Call) - in := make([]reflect.Value, call.Size) - for i := call.Size - 1; i >= 0; i-- { - param := vm.pop() - if param == nil && reflect.TypeOf(param) == nil { - // In case of nil value and nil type use this hack, - // otherwise reflect.Call will panic on zero value. - in[i] = reflect.ValueOf(¶m).Elem() - } else { - in[i] = reflect.ValueOf(param) - } + case OpCall2: + b := vm.pop() + a := vm.pop() + out, err := program.Functions[arg](a, b) + if err != nil { + panic(err) } - out := FetchFn(vm.pop(), call.Name).Call(in) - if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() { - return nil, out[1].Interface().(error) + vm.push(out) + + case OpCall3: + c := vm.pop() + b := vm.pop() + a := vm.pop() + out, err := program.Functions[arg](a, b, c) + if err != nil { + panic(err) } - vm.push(out[0].Interface()) + vm.push(out) - case OpMethodNilSafe: - call := vm.constants[vm.arg()].(Call) - in := make([]reflect.Value, call.Size) - for i := call.Size - 1; i >= 0; i-- { - param := vm.pop() - if param == nil && reflect.TypeOf(param) == nil { - // In case of nil value and nil type use this hack, - // otherwise reflect.Call will panic on zero value. - in[i] = reflect.ValueOf(¶m).Elem() - } else { - in[i] = reflect.ValueOf(param) - } + case OpCallN: + fn := vm.pop().(Function) + size := arg + in := make([]interface{}, size) + for i := int(size) - 1; i >= 0; i-- { + in[i] = vm.pop() + } + out, err := fn(in...) + if err != nil { + panic(err) } - fn := FetchFnNil(vm.pop(), call.Name) - if !fn.IsValid() { - vm.push(nil) - } else { - out := fn.Call(in) - vm.push(out[0].Interface()) + vm.push(out) + + case OpCallFast: + fn := vm.pop().(func(...interface{}) interface{}) + size := arg + in := make([]interface{}, size) + for i := int(size) - 1; i >= 0; i-- { + in[i] = vm.pop() } + vm.push(fn(in...)) + + case OpCallTyped: + fn := vm.pop() + out := vm.call(fn, arg) + vm.push(out) case OpArray: size := vm.pop().(int) @@ -362,7 +383,7 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error } vm.push(array) vm.memory += size - if vm.memory >= vm.limit { + if vm.memory >= vm.memoryBudget { panic("memory budget exceeded") } @@ -376,47 +397,77 @@ func (vm *VM) Run(program *Program, env interface{}) (out interface{}, err error } vm.push(m) vm.memory += size - if vm.memory >= vm.limit { + if vm.memory >= vm.memoryBudget { panic("memory budget exceeded") } case OpLen: - vm.push(length(vm.current())) + vm.push(runtime.Len(vm.current())) case OpCast: - t := vm.arg() + t := arg switch t { case 0: - vm.push(toInt64(vm.pop())) + vm.push(runtime.ToInt(vm.pop())) case 1: - vm.push(toFloat64(vm.pop())) + vm.push(runtime.ToInt64(vm.pop())) + case 2: + vm.push(runtime.ToFloat64(vm.pop())) } - case OpStore: + case OpDeref: + a := vm.pop() + vm.push(runtime.Deref(a)) + + case OpIncrementIt: + scope := vm.Scope() + scope.It++ + + case OpIncrementCount: + scope := vm.Scope() + scope.Count++ + + case OpGetCount: scope := vm.Scope() - key := vm.constant().(string) - value := vm.pop() - scope[key] = value + vm.push(scope.Count) - case OpLoad: + case OpGetLen: scope := vm.Scope() - key := vm.constant().(string) - vm.push(scope[key]) + vm.push(scope.Len) - case OpInc: + case OpPointer: scope := vm.Scope() - key := vm.constant().(string) - i := scope[key].(int) - i++ - scope[key] = i + vm.push(scope.Array.Index(scope.It).Interface()) case OpBegin: - scope := make(Scope) - vm.scopes = append(vm.scopes, scope) + a := vm.pop() + array := reflect.ValueOf(a) + vm.scopes = append(vm.scopes, &Scope{ + Array: array, + Len: array.Len(), + }) case OpEnd: vm.scopes = vm.scopes[:len(vm.scopes)-1] + case OpBuiltin: + switch arg { + case builtin.Len: + vm.push(runtime.Len(vm.pop())) + + case builtin.Abs: + vm.push(runtime.Abs(vm.pop())) + + case builtin.Int: + vm.push(runtime.ToInt(vm.pop())) + + case builtin.Float: + vm.push(runtime.ToFloat64(vm.pop())) + + default: + panic(fmt.Sprintf("unknown builtin %v", arg)) + } + default: panic(fmt.Sprintf("unknown bytecode %#x", op)) } @@ -452,21 +503,11 @@ func (vm *VM) pop() interface{} { return value } -func (vm *VM) arg() uint16 { - b0, b1 := vm.bytecode[vm.ip], vm.bytecode[vm.ip+1] - vm.ip += 2 - return uint16(b0) | uint16(b1)<<8 -} - -func (vm *VM) constant() interface{} { - return vm.constants[vm.arg()] -} - func (vm *VM) Stack() []interface{} { return vm.stack } -func (vm *VM) Scope() Scope { +func (vm *VM) Scope() *Scope { if len(vm.scopes) > 0 { return vm.scopes[len(vm.scopes)-1] } @@ -474,9 +515,7 @@ func (vm *VM) Scope() Scope { } func (vm *VM) Step() { - if vm.ip < len(vm.bytecode) { - vm.step <- struct{}{} - } + vm.step <- struct{}{} } func (vm *VM) Position() chan int { diff --git a/vendor/github.com/argoproj/argo-cd/v2/common/common.go b/vendor/github.com/argoproj/argo-cd/v2/common/common.go index d4e5dbca89..b52fc85990 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/common/common.go +++ b/vendor/github.com/argoproj/argo-cd/v2/common/common.go @@ -29,9 +29,9 @@ const ( ArgoCDNotificationsConfigMapName = "argocd-notifications-cm" ArgoCDNotificationsSecretName = "argocd-notifications-secret" ArgoCDRBACConfigMapName = "argocd-rbac-cm" - // Contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods + // ArgoCDKnownHostsConfigMapName contains SSH known hosts data for connecting repositories. Will get mounted as volume to pods ArgoCDKnownHostsConfigMapName = "argocd-ssh-known-hosts-cm" - // Contains TLS certificate data for connecting repositories. Will get mounted as volume to pods + // ArgoCDTLSCertsConfigMapName contains TLS certificate data for connecting repositories. Will get mounted as volume to pods ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm" ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm" ) @@ -51,28 +51,32 @@ const ( DefaultPortRepoServerMetrics = 8084 ) -// Default listener address for ArgoCD components +// DefaultAddressAPIServer for ArgoCD components const ( - DefaultAddressAPIServer = "localhost" + DefaultAddressAdminDashboard = "localhost" + DefaultAddressAPIServer = "0.0.0.0" + DefaultAddressAPIServerMetrics = "0.0.0.0" + DefaultAddressRepoServer = "0.0.0.0" + DefaultAddressRepoServerMetrics = "0.0.0.0" ) // Default paths on the pod's file system const ( - // The default path where TLS certificates for repositories are located + // DefaultPathTLSConfig is the default path where TLS certificates for repositories are located DefaultPathTLSConfig = "/app/config/tls" - // The default path where SSH known hosts are stored + // DefaultPathSSHConfig is the default path where SSH known hosts are stored DefaultPathSSHConfig = "/app/config/ssh" - // Default name for the SSH known hosts file + // DefaultSSHKnownHostsName is the Default name for the SSH known hosts file DefaultSSHKnownHostsName = "ssh_known_hosts" - // Default path to GnuPG home directory + // DefaultGnuPgHomePath is the Default path to GnuPG home directory DefaultGnuPgHomePath = "/app/config/gpg/keys" - // Default path to repo server TLS endpoint config + // DefaultAppConfigPath is the Default path to repo server TLS endpoint config DefaultAppConfigPath = "/app/config" - // Default path to cmp server plugin socket file + // DefaultPluginSockFilePath is the Default path to cmp server plugin socket file DefaultPluginSockFilePath = "/home/argocd/cmp-server/plugins" - // Default path to cmp server plugin configuration file + // DefaultPluginConfigFilePath is the Default path to cmp server plugin configuration file DefaultPluginConfigFilePath = "/home/argocd/cmp-server/config" - // Plugin Config File is a ConfigManagementPlugin manifest located inside the plugin container + // PluginConfigFileName is the Plugin Config File is a ConfigManagementPlugin manifest located inside the plugin container PluginConfigFileName = "plugin.yaml" ) @@ -99,6 +103,12 @@ const ( // PasswordPatten is the default password patten PasswordPatten = `^.{8,32}$` + + // LegacyShardingAlgorithm is the default value for Sharding Algorithm it uses an `uid` based distribution (non-uniform) + LegacyShardingAlgorithm = "legacy" + // RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards + RoundRobinShardingAlgorithm = "round-robin" + DefaultShardingAlgorithm = LegacyShardingAlgorithm ) // Dex related constants @@ -139,7 +149,7 @@ const ( // LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials LabelValueSecretTypeRepoCreds = "repo-creds" - // The Argo CD application name is used as the instance name + // AnnotationKeyAppInstance is the Argo CD application name is used as the instance name AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id" // AnnotationCompareOptions is a comma-separated list of options for comparison @@ -159,6 +169,10 @@ const ( // Ex: "http://grafana.example.com/d/yu5UH4MMz/deployments" // Ex: "Go to Dashboard|http://grafana.example.com/d/yu5UH4MMz/deployments" AnnotationKeyLinkPrefix = "link.argocd.argoproj.io/" + + // AnnotationKeyAppSkipReconcile tells the Application to skip the Application controller reconcile. + // Skip reconcile when the value is "true" or any other string values that can be strconv.ParseBool() to be true. + AnnotationKeyAppSkipReconcile = "argocd.argoproj.io/skip-reconcile" ) // Environment variables for tuning and debugging Argo CD @@ -167,19 +181,19 @@ const ( EnvVarSSODebug = "ARGOCD_SSO_DEBUG" // EnvVarRBACDebug is an environment variable to enable additional RBAC debugging in the API server EnvVarRBACDebug = "ARGOCD_RBAC_DEBUG" - // Overrides the location where SSH known hosts for repo access data is stored + // EnvVarSSHDataPath overrides the location where SSH known hosts for repo access data is stored EnvVarSSHDataPath = "ARGOCD_SSH_DATA_PATH" - // Overrides the location where TLS certificate for repo access data is stored + // EnvVarTLSDataPath overrides the location where TLS certificate for repo access data is stored EnvVarTLSDataPath = "ARGOCD_TLS_DATA_PATH" - // Specifies number of git remote operations attempts count + // EnvGitAttemptsCount specifies number of git remote operations attempts count EnvGitAttemptsCount = "ARGOCD_GIT_ATTEMPTS_COUNT" - // Specifices max duration of git remote operation retry + // EnvGitRetryMaxDuration specifices max duration of git remote operation retry EnvGitRetryMaxDuration = "ARGOCD_GIT_RETRY_MAX_DURATION" - // Specifies duration of git remote operation retry + // EnvGitRetryDuration specifies duration of git remote operation retry EnvGitRetryDuration = "ARGOCD_GIT_RETRY_DURATION" - // Specifies fator of git remote operation retry + // EnvGitRetryFactor specifies fator of git remote operation retry EnvGitRetryFactor = "ARGOCD_GIT_RETRY_FACTOR" - // Overrides git submodule support, true by default + // EnvGitSubmoduleEnabled overrides git submodule support, true by default EnvGitSubmoduleEnabled = "ARGOCD_GIT_MODULES_ENABLED" // EnvGnuPGHome is the path to ArgoCD's GnuPG keyring for signature verification EnvGnuPGHome = "ARGOCD_GNUPGHOME" @@ -195,13 +209,15 @@ const ( EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS" // EnvControllerShard is the shard number that should be handled by controller EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" + // EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin + EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM" // EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM" // EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60) EnvGithubAppCredsExpirationDuration = "ARGOCD_GITHUB_APP_CREDS_EXPIRATION_DURATION" // EnvHelmIndexCacheDuration controls how the helm repository index file is cached for (default: 0) EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION" - // EnvRepoServerConfigPath allows to override the configuration path for repo server + // EnvAppConfigPath allows to override the configuration path for repo server EnvAppConfigPath = "ARGOCD_APP_CONF_PATH" // EnvLogFormat log format that is defined by `--logformat` option EnvLogFormat = "ARGOCD_LOG_FORMAT" @@ -215,6 +231,8 @@ const ( EnvCMPChunkSize = "ARGOCD_CMP_CHUNK_SIZE" // EnvCMPWorkDir defines the full path of the work directory used by the CMP server EnvCMPWorkDir = "ARGOCD_CMP_WORKDIR" + // EnvGPGDataPath overrides the location where GPG keyring for signature verification is stored + EnvGPGDataPath = "ARGOCD_GPG_DATA_PATH" ) // Config Management Plugin related constants @@ -292,14 +310,14 @@ func GetCMPWorkDir() string { } const ( - // AnnotationApplicationRefresh is an annotation that is added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. + // AnnotationApplicationSetRefresh is an annotation that is added when an ApplicationSet is requested to be refreshed by a webhook. The ApplicationSet controller will remove this annotation at the end of reconciliation. AnnotationApplicationSetRefresh = "argocd.argoproj.io/application-set-refresh" ) // gRPC settings const ( GRPCKeepAliveEnforcementMinimum = 10 * time.Second - // Keep alive is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors + // GRPCKeepAliveTime is 2x enforcement minimum to ensure network jitter does not introduce ENHANCE_YOUR_CALM errors GRPCKeepAliveTime = 2 * GRPCKeepAliveEnforcementMinimum ) @@ -317,7 +335,7 @@ const ( SecurityLow = 1 // Unexceptional entries (i.e. successful access logs) ) -// Common error messages +// TokenVerificationError is a generic error message for a failure to verify a JWT const TokenVerificationError = "failed to verify the token" var TokenVerificationErr = errors.New(TokenVerificationError) diff --git a/vendor/github.com/argoproj/argo-cd/v2/common/version.go b/vendor/github.com/argoproj/argo-cd/v2/common/version.go index 8598f98c31..e8caf37a30 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/common/version.go +++ b/vendor/github.com/argoproj/argo-cd/v2/common/version.go @@ -16,6 +16,7 @@ var ( gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state) gitTreeState = "" // determined from `git status --porcelain`. either 'clean' or 'dirty' kubectlVersion = "" // determined from go.mod file + extraBuildInfo = "" // extra build information for vendors to populate during build ) // Version contains Argo version information @@ -29,6 +30,7 @@ type Version struct { Compiler string Platform string KubectlVersion string + ExtraBuildInfo string } func (v Version) String() string { @@ -66,6 +68,7 @@ func GetVersion() Version { versionStr += "+unknown" } } + return Version{ Version: versionStr, BuildDate: buildDate, @@ -76,5 +79,6 @@ func GetVersion() Version { Compiler: runtime.Compiler, Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), KubectlVersion: kubectlVersion, + ExtraBuildInfo: extraBuildInfo, } } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go index c235c0315c..8fd016ee36 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.go @@ -36,7 +36,11 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// ApplicationQuery is a query for application resources +// ApplicationQuery is a query for application resources. When getting multiple applications, the "projects" field acts +// as a filter. When getting a single application, you may specify either zero or one project. If you specify zero +// projects, the application will be returned regardless of which project it belongs to (assuming you have access). If +// you specify one project, the application will only be returned if it exists and belongs to the specified project. +// Otherwise you will receive a 404. type ApplicationQuery struct { // the application's name Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -211,6 +215,7 @@ type RevisionMetadataQuery struct { Revision *string `protobuf:"bytes,2,req,name=revision" json:"revision,omitempty"` // the application's namespace AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -270,6 +275,13 @@ func (m *RevisionMetadataQuery) GetAppNamespace() string { return "" } +func (m *RevisionMetadataQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + // ApplicationEventsQuery is a query for application resource events type ApplicationResourceEventsQuery struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` @@ -277,6 +289,7 @@ type ApplicationResourceEventsQuery struct { ResourceName *string `protobuf:"bytes,3,opt,name=resourceName" json:"resourceName,omitempty"` ResourceUID *string `protobuf:"bytes,4,opt,name=resourceUID" json:"resourceUID,omitempty"` AppNamespace *string `protobuf:"bytes,5,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,6,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -350,11 +363,19 @@ func (m *ApplicationResourceEventsQuery) GetAppNamespace() string { return "" } +func (m *ApplicationResourceEventsQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + // ManifestQuery is a query for manifest resources type ApplicationManifestQuery struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Revision *string `protobuf:"bytes,2,opt,name=revision" json:"revision,omitempty"` AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -414,6 +435,13 @@ func (m *ApplicationManifestQuery) GetAppNamespace() string { return "" } +func (m *ApplicationManifestQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type FileChunk struct { Chunk []byte `protobuf:"bytes,1,req,name=chunk" json:"chunk,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -465,6 +493,7 @@ type ApplicationManifestQueryWithFiles struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Checksum *string `protobuf:"bytes,2,req,name=checksum" json:"checksum,omitempty"` AppNamespace *string `protobuf:"bytes,3,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -524,6 +553,13 @@ func (m *ApplicationManifestQueryWithFiles) GetAppNamespace() string { return "" } +func (m *ApplicationManifestQueryWithFiles) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationManifestQueryWithFilesWrapper struct { // Types that are valid to be assigned to Part: // *ApplicationManifestQueryWithFilesWrapper_Query @@ -721,6 +757,7 @@ func (m *ApplicationCreateRequest) GetValidate() bool { type ApplicationUpdateRequest struct { Application *v1alpha1.Application `protobuf:"bytes,1,req,name=application" json:"application,omitempty"` Validate *bool `protobuf:"varint,2,opt,name=validate" json:"validate,omitempty"` + Project *string `protobuf:"bytes,3,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -773,11 +810,19 @@ func (m *ApplicationUpdateRequest) GetValidate() bool { return false } +func (m *ApplicationUpdateRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationDeleteRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Cascade *bool `protobuf:"varint,2,opt,name=cascade" json:"cascade,omitempty"` PropagationPolicy *string `protobuf:"bytes,3,opt,name=propagationPolicy" json:"propagationPolicy,omitempty"` AppNamespace *string `protobuf:"bytes,4,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,5,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -844,6 +889,13 @@ func (m *ApplicationDeleteRequest) GetAppNamespace() string { return "" } +func (m *ApplicationDeleteRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type SyncOptions struct { Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -904,6 +956,7 @@ type ApplicationSyncRequest struct { RetryStrategy *v1alpha1.RetryStrategy `protobuf:"bytes,10,opt,name=retryStrategy" json:"retryStrategy,omitempty"` SyncOptions *SyncOptions `protobuf:"bytes,11,opt,name=syncOptions" json:"syncOptions,omitempty"` AppNamespace *string `protobuf:"bytes,12,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,13,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1019,12 +1072,20 @@ func (m *ApplicationSyncRequest) GetAppNamespace() string { return "" } +func (m *ApplicationSyncRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + // ApplicationUpdateSpecRequest is a request to update application spec type ApplicationUpdateSpecRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Spec *v1alpha1.ApplicationSpec `protobuf:"bytes,2,req,name=spec" json:"spec,omitempty"` Validate *bool `protobuf:"varint,3,opt,name=validate" json:"validate,omitempty"` AppNamespace *string `protobuf:"bytes,4,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,5,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1091,12 +1152,20 @@ func (m *ApplicationUpdateSpecRequest) GetAppNamespace() string { return "" } +func (m *ApplicationUpdateSpecRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + // ApplicationPatchRequest is a request to patch an application type ApplicationPatchRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Patch *string `protobuf:"bytes,2,req,name=patch" json:"patch,omitempty"` PatchType *string `protobuf:"bytes,3,req,name=patchType" json:"patchType,omitempty"` AppNamespace *string `protobuf:"bytes,5,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,6,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1163,12 +1232,20 @@ func (m *ApplicationPatchRequest) GetAppNamespace() string { return "" } +func (m *ApplicationPatchRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationRollbackRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` DryRun *bool `protobuf:"varint,3,opt,name=dryRun" json:"dryRun,omitempty"` Prune *bool `protobuf:"varint,4,opt,name=prune" json:"prune,omitempty"` AppNamespace *string `protobuf:"bytes,6,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,7,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1242,6 +1319,13 @@ func (m *ApplicationRollbackRequest) GetAppNamespace() string { return "" } +func (m *ApplicationRollbackRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationResourceRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` @@ -1250,6 +1334,7 @@ type ApplicationResourceRequest struct { Group *string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` Kind *string `protobuf:"bytes,6,req,name=kind" json:"kind,omitempty"` AppNamespace *string `protobuf:"bytes,7,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,8,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1337,6 +1422,13 @@ func (m *ApplicationResourceRequest) GetAppNamespace() string { return "" } +func (m *ApplicationResourceRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationResourcePatchRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` @@ -1347,6 +1439,7 @@ type ApplicationResourcePatchRequest struct { Patch *string `protobuf:"bytes,7,req,name=patch" json:"patch,omitempty"` PatchType *string `protobuf:"bytes,8,req,name=patchType" json:"patchType,omitempty"` AppNamespace *string `protobuf:"bytes,9,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,10,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1448,6 +1541,13 @@ func (m *ApplicationResourcePatchRequest) GetAppNamespace() string { return "" } +func (m *ApplicationResourcePatchRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationResourceDeleteRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` @@ -1458,6 +1558,7 @@ type ApplicationResourceDeleteRequest struct { Force *bool `protobuf:"varint,7,opt,name=force" json:"force,omitempty"` Orphan *bool `protobuf:"varint,8,opt,name=orphan" json:"orphan,omitempty"` AppNamespace *string `protobuf:"bytes,9,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,10,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1559,6 +1660,13 @@ func (m *ApplicationResourceDeleteRequest) GetAppNamespace() string { return "" } +func (m *ApplicationResourceDeleteRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ResourceActionRunRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Namespace *string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` @@ -1568,6 +1676,7 @@ type ResourceActionRunRequest struct { Kind *string `protobuf:"bytes,6,req,name=kind" json:"kind,omitempty"` Action *string `protobuf:"bytes,7,req,name=action" json:"action,omitempty"` AppNamespace *string `protobuf:"bytes,8,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,9,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1662,6 +1771,13 @@ func (m *ResourceActionRunRequest) GetAppNamespace() string { return "" } +func (m *ResourceActionRunRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ResourceActionsListResponse struct { Actions []*v1alpha1.ResourceAction `protobuf:"bytes,1,rep,name=actions" json:"actions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1772,6 +1888,7 @@ type ApplicationPodLogsQuery struct { ResourceName *string `protobuf:"bytes,13,opt,name=resourceName" json:"resourceName,omitempty"` Previous *bool `protobuf:"varint,14,opt,name=previous" json:"previous,omitempty"` AppNamespace *string `protobuf:"bytes,15,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,16,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1915,6 +2032,13 @@ func (m *ApplicationPodLogsQuery) GetAppNamespace() string { return "" } +func (m *ApplicationPodLogsQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type LogEntry struct { Content *string `protobuf:"bytes,1,req,name=content" json:"content,omitempty"` // deprecated in favor of timeStampStr since meta.v1.Time don't support nano time @@ -1998,6 +2122,7 @@ func (m *LogEntry) GetPodName() string { type OperationTerminateRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` AppNamespace *string `protobuf:"bytes,2,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,3,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2050,9 +2175,17 @@ func (m *OperationTerminateRequest) GetAppNamespace() string { return "" } +func (m *OperationTerminateRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationSyncWindowsQuery struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` AppNamespace *string `protobuf:"bytes,2,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,3,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2105,6 +2238,13 @@ func (m *ApplicationSyncWindowsQuery) GetAppNamespace() string { return "" } +func (m *ApplicationSyncWindowsQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ApplicationSyncWindowsResponse struct { ActiveWindows []*ApplicationSyncWindow `protobuf:"bytes,1,rep,name=activeWindows" json:"activeWindows,omitempty"` AssignedWindows []*ApplicationSyncWindow `protobuf:"bytes,2,rep,name=assignedWindows" json:"assignedWindows,omitempty"` @@ -2286,6 +2426,7 @@ type ResourcesQuery struct { Group *string `protobuf:"bytes,5,opt,name=group" json:"group,omitempty"` Kind *string `protobuf:"bytes,6,opt,name=kind" json:"kind,omitempty"` AppNamespace *string `protobuf:"bytes,7,opt,name=appNamespace" json:"appNamespace,omitempty"` + Project *string `protobuf:"bytes,8,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2373,6 +2514,13 @@ func (m *ResourcesQuery) GetAppNamespace() string { return "" } +func (m *ResourcesQuery) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + type ManagedResourcesResponse struct { Items []*v1alpha1.ResourceDiff `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2541,6 +2689,7 @@ func (m *LinksResponse) GetItems() []*LinkInfo { type ListAppLinksRequest struct { Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + Project *string `protobuf:"bytes,4,opt,name=project" json:"project,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2593,6 +2742,13 @@ func (m *ListAppLinksRequest) GetNamespace() string { return "" } +func (m *ListAppLinksRequest) GetProject() string { + if m != nil && m.Project != nil { + return *m.Project + } + return "" +} + func init() { proto.RegisterType((*ApplicationQuery)(nil), "application.ApplicationQuery") proto.RegisterType((*NodeQuery)(nil), "application.NodeQuery") @@ -2636,169 +2792,175 @@ func init() { } var fileDescriptor_df6e82b174b5eaec = []byte{ - // 2590 bytes of a gzipped FileDescriptorProto + // 2673 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x8f, 0x1c, 0x47, - 0x15, 0xa7, 0x66, 0xbf, 0x66, 0xde, 0xac, 0xbf, 0x2a, 0xf1, 0xd2, 0x69, 0xaf, 0xcd, 0xba, 0xfd, - 0xb5, 0x5e, 0x7b, 0x67, 0xec, 0xc1, 0x20, 0x67, 0x13, 0x04, 0xb6, 0xe3, 0x2f, 0x58, 0x3b, 0xa6, - 0xd7, 0xc6, 0x28, 0x1c, 0xa0, 0xd2, 0x53, 0x3b, 0xdb, 0x6c, 0x4f, 0x77, 0xbb, 0xbb, 0x67, 0xac, - 0x91, 0xf1, 0x25, 0x88, 0x13, 0x51, 0x90, 0x92, 0x1c, 0x50, 0x14, 0x21, 0x94, 0x28, 0x17, 0x2e, - 0xdc, 0x10, 0x12, 0x17, 0xb8, 0x20, 0x90, 0x38, 0x20, 0x3e, 0x2e, 0x9c, 0x90, 0xc5, 0x8d, 0x0b, - 0x07, 0xfe, 0x00, 0x54, 0xd5, 0x55, 0xdd, 0xd5, 0x33, 0x3d, 0x3d, 0xbd, 0xec, 0x46, 0xf1, 0xad, - 0x5e, 0x4d, 0xd5, 0x7b, 0xbf, 0x7a, 0xf5, 0xbe, 0xea, 0xf5, 0xc0, 0xc9, 0x90, 0x06, 0x7d, 0x1a, - 0x34, 0x89, 0xef, 0x3b, 0xb6, 0x45, 0x22, 0xdb, 0x73, 0xd5, 0x71, 0xc3, 0x0f, 0xbc, 0xc8, 0xc3, - 0x75, 0x65, 0x4a, 0x5f, 0xec, 0x78, 0x5e, 0xc7, 0xa1, 0x4d, 0xe2, 0xdb, 0x4d, 0xe2, 0xba, 0x5e, - 0xc4, 0xa7, 0xc3, 0x78, 0xa9, 0x6e, 0x6c, 0x5f, 0x0e, 0x1b, 0xb6, 0xc7, 0x7f, 0xb5, 0xbc, 0x80, - 0x36, 0xfb, 0x17, 0x9b, 0x1d, 0xea, 0xd2, 0x80, 0x44, 0xb4, 0x2d, 0xd6, 0x5c, 0x4a, 0xd7, 0x74, - 0x89, 0xb5, 0x65, 0xbb, 0x34, 0x18, 0x34, 0xfd, 0xed, 0x0e, 0x9b, 0x08, 0x9b, 0x5d, 0x1a, 0x91, - 0xbc, 0x5d, 0xeb, 0x1d, 0x3b, 0xda, 0xea, 0xbd, 0xd9, 0xb0, 0xbc, 0x6e, 0x93, 0x04, 0x1d, 0xcf, - 0x0f, 0xbc, 0xef, 0xf3, 0xc1, 0xaa, 0xd5, 0x6e, 0xf6, 0x5b, 0x29, 0x03, 0xf5, 0x2c, 0xfd, 0x8b, - 0xc4, 0xf1, 0xb7, 0xc8, 0x28, 0xb7, 0xeb, 0x13, 0xb8, 0x05, 0xd4, 0xf7, 0x84, 0x6e, 0xf8, 0xd0, - 0x8e, 0xbc, 0x60, 0xa0, 0x0c, 0x63, 0x36, 0xc6, 0x7f, 0x11, 0x1c, 0xbc, 0x92, 0xca, 0xfb, 0x66, - 0x8f, 0x06, 0x03, 0x8c, 0x61, 0xda, 0x25, 0x5d, 0xaa, 0xa1, 0x25, 0xb4, 0x5c, 0x33, 0xf9, 0x18, - 0x6b, 0x30, 0x17, 0xd0, 0xcd, 0x80, 0x86, 0x5b, 0x5a, 0x85, 0x4f, 0x4b, 0x12, 0xeb, 0x50, 0x65, - 0xc2, 0xa9, 0x15, 0x85, 0xda, 0xd4, 0xd2, 0xd4, 0x72, 0xcd, 0x4c, 0x68, 0xbc, 0x0c, 0x07, 0x02, - 0x1a, 0x7a, 0xbd, 0xc0, 0xa2, 0xdf, 0xa2, 0x41, 0x68, 0x7b, 0xae, 0x36, 0xcd, 0x77, 0x0f, 0x4f, - 0x33, 0x2e, 0x21, 0x75, 0xa8, 0x15, 0x79, 0x81, 0x36, 0xc3, 0x97, 0x24, 0x34, 0xc3, 0xc3, 0x80, - 0x6b, 0xb3, 0x31, 0x1e, 0x36, 0xc6, 0x06, 0xcc, 0x13, 0xdf, 0xbf, 0x4b, 0xba, 0x34, 0xf4, 0x89, - 0x45, 0xb5, 0x39, 0xfe, 0x5b, 0x66, 0x8e, 0x61, 0x16, 0x48, 0xb4, 0x2a, 0x07, 0x26, 0x49, 0xe3, - 0x1a, 0xd4, 0xee, 0x7a, 0x6d, 0x3a, 0xfe, 0xb8, 0xc3, 0xec, 0x2b, 0xa3, 0xec, 0x8d, 0x6d, 0x38, - 0x6c, 0xd2, 0xbe, 0xcd, 0xe0, 0xdf, 0xa1, 0x11, 0x69, 0x93, 0x88, 0x0c, 0x33, 0xac, 0x24, 0x0c, - 0x75, 0xa8, 0x06, 0x62, 0xb1, 0x56, 0xe1, 0xf3, 0x09, 0x3d, 0x22, 0x6c, 0x2a, 0x47, 0xd8, 0x9f, - 0x10, 0x1c, 0x53, 0x2e, 0xca, 0x14, 0xea, 0xbb, 0xde, 0xa7, 0x6e, 0x14, 0x8e, 0x17, 0x7b, 0x1e, - 0x0e, 0x49, 0x4d, 0x0f, 0x1f, 0x66, 0xf4, 0x07, 0x06, 0x44, 0x9d, 0x94, 0x40, 0xd4, 0x39, 0xbc, - 0x04, 0x75, 0x49, 0x3f, 0xb8, 0xfd, 0x9a, 0xb8, 0x4e, 0x75, 0x6a, 0xe4, 0x38, 0x33, 0x39, 0xc7, - 0x71, 0x41, 0x53, 0x4e, 0x73, 0x87, 0xb8, 0xf6, 0x26, 0x0d, 0xa3, 0xb2, 0xea, 0x43, 0x3b, 0x56, - 0xdf, 0x71, 0xa8, 0xdd, 0xb0, 0x1d, 0x7a, 0x6d, 0xab, 0xe7, 0x6e, 0xe3, 0x17, 0x61, 0xc6, 0x62, - 0x03, 0x2e, 0x61, 0xde, 0x8c, 0x09, 0xe3, 0x31, 0x1c, 0x1f, 0x07, 0xe9, 0xa1, 0x1d, 0x6d, 0xb1, - 0xed, 0xe1, 0x38, 0x6c, 0xd6, 0x16, 0xb5, 0xb6, 0xc3, 0x5e, 0x57, 0x5e, 0xad, 0xa4, 0x4b, 0x61, - 0xfb, 0x05, 0x82, 0xe5, 0x89, 0x92, 0x1f, 0x06, 0xc4, 0xf7, 0x69, 0x80, 0x6f, 0xc0, 0xcc, 0x23, - 0xf6, 0x03, 0xb7, 0xd6, 0x7a, 0xab, 0xd1, 0x50, 0xa3, 0xdd, 0x44, 0x2e, 0xb7, 0x3e, 0x67, 0xc6, - 0xdb, 0x71, 0x43, 0xea, 0xa0, 0xc2, 0xf9, 0x2c, 0x64, 0xf8, 0x24, 0xaa, 0x62, 0xeb, 0xf9, 0xb2, - 0xab, 0xb3, 0x30, 0xed, 0x93, 0x20, 0x32, 0x0e, 0xc3, 0x0b, 0x59, 0x33, 0xf4, 0x3d, 0x37, 0xa4, - 0xc6, 0x6f, 0x50, 0xe6, 0x42, 0xaf, 0x05, 0x94, 0x44, 0xd4, 0xa4, 0x8f, 0x7a, 0x34, 0x8c, 0xf0, - 0x36, 0xa8, 0x01, 0x98, 0xeb, 0xae, 0xde, 0xba, 0xdd, 0x48, 0x23, 0x58, 0x43, 0x46, 0x30, 0x3e, - 0xf8, 0xae, 0xd5, 0x6e, 0xf4, 0x5b, 0x0d, 0x7f, 0xbb, 0xd3, 0x60, 0xf1, 0x30, 0x83, 0x4c, 0xc6, - 0x43, 0xf5, 0xa8, 0xa6, 0xca, 0x1d, 0x2f, 0xc0, 0x6c, 0xcf, 0x0f, 0x69, 0x10, 0xf1, 0x93, 0x55, - 0x4d, 0x41, 0xb1, 0x5b, 0xea, 0x13, 0xc7, 0x6e, 0x93, 0x28, 0xbe, 0x85, 0xaa, 0x99, 0xd0, 0xc6, - 0xc7, 0x59, 0xf4, 0x0f, 0xfc, 0xf6, 0x67, 0x85, 0x5e, 0x45, 0x59, 0x19, 0x42, 0xf9, 0x41, 0x16, - 0xe5, 0x6b, 0xd4, 0xa1, 0x29, 0xca, 0x3c, 0xc3, 0xd4, 0x60, 0xce, 0x22, 0xa1, 0x45, 0xda, 0x92, - 0x97, 0x24, 0x59, 0x58, 0xf0, 0x03, 0xcf, 0x27, 0x1d, 0xce, 0xe9, 0x9e, 0xe7, 0xd8, 0xd6, 0x40, - 0xd8, 0xe6, 0xe8, 0x0f, 0x23, 0x46, 0x3c, 0x9d, 0x63, 0xc4, 0x27, 0xa0, 0xbe, 0x31, 0x70, 0xad, - 0xd7, 0x7d, 0x9e, 0x4c, 0x99, 0x8b, 0xd9, 0x11, 0xed, 0x86, 0x1a, 0xe2, 0x81, 0x37, 0x26, 0x8c, - 0x0f, 0x67, 0x60, 0x41, 0x39, 0x01, 0xdb, 0x50, 0x84, 0xbf, 0xc8, 0xe9, 0x17, 0x60, 0xb6, 0x1d, - 0x0c, 0xcc, 0x9e, 0x2b, 0x2e, 0x53, 0x50, 0x4c, 0xb0, 0x1f, 0xf4, 0xdc, 0x18, 0x64, 0xd5, 0x8c, - 0x09, 0xbc, 0x09, 0xd5, 0x30, 0x62, 0xe9, 0xb3, 0x33, 0xe0, 0xe1, 0xa8, 0xde, 0xfa, 0xfa, 0xee, - 0x2e, 0x90, 0x41, 0xdf, 0x10, 0x1c, 0xcd, 0x84, 0x37, 0x7e, 0x04, 0x35, 0x19, 0x09, 0x43, 0x6d, - 0x6e, 0x69, 0x6a, 0xb9, 0xde, 0xda, 0xd8, 0xbd, 0xa0, 0xd7, 0x7d, 0x96, 0xfa, 0x95, 0xa8, 0x6f, - 0xa6, 0x52, 0xf0, 0x22, 0xd4, 0xba, 0xc2, 0xd7, 0x43, 0x91, 0xe6, 0xd2, 0x09, 0xfc, 0x6d, 0x98, - 0xb1, 0xdd, 0x4d, 0x2f, 0xd4, 0x6a, 0x1c, 0xcc, 0xd5, 0xdd, 0x81, 0xb9, 0xed, 0x6e, 0x7a, 0x66, - 0xcc, 0x10, 0x3f, 0x82, 0x7d, 0x01, 0x8d, 0x82, 0x81, 0xd4, 0x82, 0x06, 0x5c, 0xaf, 0xdf, 0xd8, - 0x9d, 0x04, 0x53, 0x65, 0x69, 0x66, 0x25, 0xe0, 0x35, 0xa8, 0x87, 0xa9, 0x8d, 0x69, 0x75, 0x2e, - 0x50, 0xcb, 0x30, 0x52, 0x6c, 0xd0, 0x54, 0x17, 0x8f, 0xd8, 0xf0, 0x7c, 0x8e, 0x0d, 0xff, 0x1d, - 0xc1, 0xe2, 0x48, 0x18, 0xd8, 0xf0, 0x69, 0xa1, 0x91, 0x12, 0x98, 0x0e, 0x7d, 0x6a, 0xf1, 0xc8, - 0x5f, 0x6f, 0xdd, 0xd9, 0xb3, 0xb8, 0xc0, 0xe5, 0x72, 0xd6, 0x45, 0xa1, 0xab, 0x94, 0x6f, 0xfe, - 0x08, 0xc1, 0xe7, 0x15, 0xce, 0xf7, 0x48, 0x64, 0x6d, 0x15, 0x1d, 0x89, 0xf9, 0x10, 0x5b, 0x23, - 0xb2, 0x59, 0x4c, 0x30, 0x43, 0xe3, 0x83, 0xfb, 0x03, 0x9f, 0xc1, 0x60, 0xbf, 0xa4, 0x13, 0xa5, - 0x92, 0xfe, 0xbb, 0x08, 0x74, 0x35, 0xf2, 0x79, 0x8e, 0xf3, 0x26, 0xb1, 0xb6, 0x8b, 0xa0, 0xec, - 0x87, 0x8a, 0xdd, 0xe6, 0x38, 0xa6, 0xcc, 0x8a, 0xdd, 0xde, 0xa1, 0xdb, 0x0f, 0x83, 0x9a, 0xcd, - 0x01, 0xf5, 0x8f, 0x21, 0x50, 0xd2, 0xc5, 0x0a, 0x40, 0x2d, 0x42, 0xcd, 0x1d, 0x2a, 0xa6, 0xd2, - 0x89, 0x9c, 0x22, 0xaa, 0x32, 0x52, 0x44, 0x69, 0x30, 0xd7, 0x4f, 0xea, 0x61, 0xf6, 0xb3, 0x24, - 0xd9, 0x41, 0x3a, 0x81, 0xd7, 0xf3, 0x85, 0x02, 0x63, 0x82, 0xa1, 0xd8, 0xb6, 0xdd, 0xb6, 0x36, - 0x1b, 0xa3, 0x60, 0xe3, 0x32, 0x15, 0xb0, 0xf1, 0x5e, 0x05, 0xbe, 0x90, 0x73, 0xb8, 0x89, 0x16, - 0xf0, 0x7c, 0x9c, 0x30, 0xb1, 0xc3, 0xb9, 0xb1, 0x76, 0x58, 0x9d, 0x64, 0x87, 0xb5, 0x1c, 0xad, - 0xbc, 0x53, 0x81, 0xa5, 0x1c, 0xad, 0x4c, 0x4e, 0xa8, 0xcf, 0x8d, 0x5a, 0x36, 0xbd, 0x40, 0xdc, - 0x78, 0xd5, 0x8c, 0x09, 0xe6, 0x19, 0x5e, 0xe0, 0x6f, 0x11, 0x57, 0xab, 0xc6, 0x9e, 0x11, 0x53, - 0xa5, 0x14, 0xf2, 0x1f, 0x04, 0x9a, 0xd4, 0xc2, 0x15, 0x8b, 0xeb, 0xa4, 0xe7, 0x3e, 0xff, 0x8a, - 0x58, 0x80, 0x59, 0xc2, 0xd1, 0x0a, 0x03, 0x11, 0xd4, 0xc8, 0x91, 0xab, 0xf9, 0x31, 0xf1, 0x48, - 0xf6, 0xc8, 0xe1, 0xba, 0x1d, 0x46, 0xb2, 0xa0, 0xc5, 0x9b, 0x30, 0x17, 0x73, 0x8b, 0x4b, 0x98, - 0x7a, 0x6b, 0x7d, 0xb7, 0x89, 0x2d, 0xa3, 0x5e, 0xc9, 0xdc, 0x78, 0x19, 0x8e, 0xe4, 0x46, 0x1f, - 0x01, 0x43, 0x87, 0xaa, 0x4c, 0xe6, 0xe2, 0x02, 0x12, 0xda, 0xf8, 0xf7, 0x54, 0x36, 0xac, 0x7b, - 0xed, 0x75, 0xaf, 0x53, 0xf0, 0x16, 0x2c, 0xbe, 0x34, 0xf6, 0x58, 0xf6, 0xda, 0xca, 0xb3, 0x4f, - 0x92, 0x6c, 0x9f, 0xe5, 0xb9, 0x11, 0xb1, 0x5d, 0x1a, 0x88, 0xfc, 0x92, 0x4e, 0x30, 0x65, 0x87, - 0xb6, 0x6b, 0xd1, 0x0d, 0x6a, 0x79, 0x6e, 0x3b, 0xe4, 0xb7, 0x36, 0x65, 0x66, 0xe6, 0xf0, 0x2d, - 0xa8, 0x71, 0xfa, 0xbe, 0xdd, 0x8d, 0x83, 0x70, 0xbd, 0xb5, 0xd2, 0x88, 0x9b, 0x28, 0x0d, 0xb5, - 0x89, 0x92, 0xea, 0xb0, 0x4b, 0x23, 0xd2, 0xe8, 0x5f, 0x6c, 0xb0, 0x1d, 0x66, 0xba, 0x99, 0x61, - 0x89, 0x88, 0xed, 0xac, 0xdb, 0x2e, 0x2f, 0xb0, 0x98, 0xa8, 0x74, 0x82, 0x19, 0xc4, 0xa6, 0xe7, - 0x38, 0xde, 0x63, 0xe9, 0x03, 0x31, 0xc5, 0x76, 0xf5, 0xdc, 0xc8, 0x76, 0xb8, 0xfc, 0xd8, 0x01, - 0xd2, 0x09, 0xbe, 0xcb, 0x76, 0x22, 0x1a, 0xf0, 0x12, 0xa6, 0x66, 0x0a, 0x2a, 0x31, 0xb9, 0x7a, - 0xdc, 0x17, 0x90, 0xbe, 0x17, 0x1b, 0xe7, 0xbc, 0x6a, 0x9c, 0xc3, 0x06, 0xbf, 0x2f, 0xe7, 0xdd, - 0xcc, 0xdb, 0x24, 0xb4, 0x6f, 0x7b, 0xbd, 0x50, 0xdb, 0x1f, 0x27, 0x71, 0x49, 0x8f, 0x18, 0xec, - 0x81, 0x1c, 0x83, 0xfd, 0x2d, 0x82, 0xea, 0xba, 0xd7, 0xb9, 0xee, 0x46, 0xc1, 0x80, 0x57, 0xf6, - 0x9e, 0x1b, 0x51, 0x57, 0x5a, 0x85, 0x24, 0x99, 0xaa, 0x23, 0xbb, 0x4b, 0x37, 0x22, 0xd2, 0xf5, - 0x45, 0x4d, 0xb2, 0x23, 0x55, 0x27, 0x9b, 0xd9, 0xf1, 0x1d, 0x12, 0x46, 0xdc, 0x7b, 0xab, 0x26, - 0x1f, 0x33, 0xa0, 0xc9, 0x82, 0x8d, 0x28, 0x10, 0xae, 0x9b, 0x99, 0x53, 0x0d, 0x69, 0x26, 0xc6, - 0x26, 0x48, 0x63, 0x03, 0x5e, 0x4a, 0x4a, 0xd9, 0xfb, 0x34, 0xe8, 0xda, 0x2e, 0x29, 0x8e, 0xb7, - 0x65, 0xba, 0x30, 0x0f, 0x32, 0x0e, 0xc4, 0xea, 0xbf, 0x87, 0xb6, 0xdb, 0xf6, 0x1e, 0x17, 0x38, - 0x42, 0x19, 0xb6, 0x7f, 0xc9, 0xf6, 0x5b, 0x14, 0xbe, 0x89, 0x6f, 0xde, 0x82, 0x7d, 0xcc, 0x8b, - 0xfb, 0x54, 0xfc, 0x20, 0x02, 0x85, 0x31, 0xee, 0x49, 0x9e, 0xf2, 0x30, 0xb3, 0x1b, 0xf1, 0x3a, - 0x1c, 0x20, 0x61, 0x68, 0x77, 0x5c, 0xda, 0x96, 0xbc, 0x2a, 0xa5, 0x79, 0x0d, 0x6f, 0x8d, 0x9f, - 0x7d, 0x7c, 0x85, 0xb8, 0x3b, 0x49, 0x1a, 0x3f, 0x44, 0x70, 0x38, 0x97, 0x49, 0x62, 0xeb, 0x48, - 0x09, 0xaf, 0x3a, 0x54, 0x43, 0x6b, 0x8b, 0xb6, 0x7b, 0x0e, 0x95, 0x7d, 0x0d, 0x49, 0xb3, 0xdf, - 0xda, 0xbd, 0xf8, 0x26, 0x45, 0x78, 0x4f, 0x68, 0x7c, 0x0c, 0xa0, 0x4b, 0xdc, 0x1e, 0x71, 0x38, - 0x84, 0x69, 0x0e, 0x41, 0x99, 0x31, 0x16, 0x41, 0xcf, 0x33, 0x03, 0xd1, 0x49, 0xf8, 0x1b, 0x82, - 0xfd, 0x32, 0x0c, 0x8a, 0x3b, 0x5c, 0x86, 0x03, 0x8a, 0x1a, 0xee, 0xa6, 0xd7, 0x39, 0x3c, 0x3d, - 0x21, 0xc4, 0x49, 0x5b, 0x98, 0xca, 0xf6, 0x35, 0xfb, 0x99, 0xce, 0x64, 0xe9, 0x3c, 0x84, 0x76, - 0x54, 0x89, 0xfd, 0x00, 0xb4, 0x3b, 0xc4, 0x25, 0x1d, 0xda, 0x4e, 0x0e, 0x97, 0x18, 0xd2, 0xf7, - 0xd4, 0xc7, 0xf2, 0xae, 0x9f, 0xa6, 0x49, 0x39, 0x63, 0x6f, 0x6e, 0xca, 0x87, 0x77, 0x00, 0xd5, - 0x75, 0xdb, 0xdd, 0x66, 0xef, 0x37, 0x76, 0xae, 0xc8, 0x8e, 0x1c, 0xa9, 0xc3, 0x98, 0xc0, 0x07, - 0x61, 0xaa, 0x17, 0x38, 0xe2, 0x9e, 0xd9, 0x10, 0x2f, 0x41, 0xbd, 0x4d, 0x43, 0x2b, 0xb0, 0x7d, - 0x71, 0xcb, 0xbc, 0xd1, 0xa7, 0x4c, 0x31, 0x6d, 0xdb, 0x96, 0xe7, 0x5e, 0x73, 0x48, 0x18, 0xca, - 0xc4, 0x90, 0x4c, 0x18, 0xaf, 0xc2, 0x3e, 0x26, 0x33, 0x3d, 0xe6, 0xb9, 0xec, 0x31, 0x0f, 0x67, - 0xe0, 0x4b, 0x78, 0x12, 0xf1, 0x4d, 0x78, 0x81, 0xe5, 0xe3, 0x2b, 0xbe, 0x2f, 0x98, 0x94, 0x2c, - 0x46, 0xa6, 0x86, 0x2e, 0xbd, 0xf5, 0x63, 0x03, 0xb0, 0x6a, 0xf3, 0x34, 0xe8, 0xdb, 0x16, 0xc5, - 0xef, 0x22, 0x98, 0x66, 0x02, 0xf0, 0xd1, 0x71, 0x2e, 0xc6, 0x6d, 0x4f, 0xdf, 0xbb, 0x07, 0x1d, - 0x93, 0x66, 0x2c, 0xbe, 0xf5, 0xd7, 0x7f, 0xbd, 0x57, 0x59, 0xc0, 0x2f, 0xf2, 0x0f, 0x0c, 0xfd, - 0x8b, 0x6a, 0xb3, 0x3f, 0xc4, 0x6f, 0x23, 0xc0, 0xa2, 0x0a, 0x51, 0xba, 0xbb, 0xf8, 0xdc, 0x38, - 0x88, 0x39, 0x5d, 0x60, 0xfd, 0xa8, 0x12, 0xed, 0x1b, 0x96, 0x17, 0x50, 0x16, 0xdb, 0xf9, 0x02, - 0x0e, 0x60, 0x85, 0x03, 0x38, 0x89, 0x8d, 0x3c, 0x00, 0xcd, 0x27, 0x4c, 0x6f, 0x4f, 0x9b, 0x34, - 0x96, 0xfb, 0x11, 0x82, 0x99, 0x87, 0xbc, 0xe6, 0x9e, 0xa0, 0xa4, 0x8d, 0x3d, 0x53, 0x12, 0x17, - 0xc7, 0xd1, 0x1a, 0x27, 0x38, 0xd2, 0xa3, 0xf8, 0x88, 0x44, 0x1a, 0x46, 0x01, 0x25, 0xdd, 0x0c, - 0xe0, 0x0b, 0x08, 0x7f, 0x82, 0x60, 0x36, 0x6e, 0x37, 0xe2, 0x53, 0xe3, 0x50, 0x66, 0xda, 0x91, - 0xfa, 0xde, 0xf5, 0xee, 0x8c, 0xb3, 0x1c, 0xe3, 0x09, 0x23, 0xf7, 0x3a, 0xd7, 0x32, 0x9d, 0xbd, - 0xf7, 0x11, 0x4c, 0xdd, 0xa4, 0x13, 0xed, 0x6d, 0x0f, 0xc1, 0x8d, 0x28, 0x30, 0xe7, 0xaa, 0xf1, - 0xc7, 0x08, 0x5e, 0xba, 0x49, 0xa3, 0xfc, 0x54, 0x87, 0x97, 0x27, 0xe7, 0x1f, 0x61, 0x76, 0xe7, - 0x4a, 0xac, 0x4c, 0x62, 0x7c, 0x93, 0x23, 0x3b, 0x8b, 0xcf, 0x14, 0x19, 0x61, 0x38, 0x70, 0xad, - 0xc7, 0x02, 0xc7, 0x1f, 0x11, 0x1c, 0x1c, 0xfe, 0xd6, 0x82, 0xb3, 0xc9, 0x31, 0xf7, 0x53, 0x8c, - 0x7e, 0x77, 0xb7, 0xb1, 0x34, 0xcb, 0xd4, 0xb8, 0xc2, 0x91, 0xbf, 0x82, 0x5f, 0x2e, 0x42, 0x2e, - 0x9b, 0x94, 0x61, 0xf3, 0x89, 0x1c, 0x3e, 0xe5, 0x9f, 0x05, 0x39, 0xec, 0xb7, 0x10, 0xcc, 0xdf, - 0xa4, 0xd1, 0x9d, 0xa4, 0x47, 0x77, 0xaa, 0x54, 0x0f, 0x5f, 0x5f, 0x6c, 0x28, 0x5f, 0xef, 0xe4, - 0x4f, 0x89, 0x4a, 0x57, 0x39, 0xb0, 0x33, 0xf8, 0x54, 0x11, 0xb0, 0xb4, 0x2f, 0xf8, 0x11, 0x82, - 0xc3, 0x2a, 0x88, 0xf4, 0x0b, 0xc7, 0x97, 0x76, 0xf6, 0x45, 0x41, 0x7c, 0x97, 0x98, 0x80, 0xae, - 0xc5, 0xd1, 0x9d, 0x37, 0xf2, 0x2f, 0xbc, 0x3b, 0x82, 0x62, 0x0d, 0xad, 0x2c, 0x23, 0xfc, 0x3b, - 0x04, 0xb3, 0x71, 0x13, 0x6e, 0xbc, 0x8e, 0x32, 0xbd, 0xfa, 0xbd, 0xf4, 0x9e, 0xeb, 0x1c, 0xf2, - 0x57, 0xf5, 0x0b, 0xf9, 0x0a, 0x55, 0xf7, 0xcb, 0xab, 0x6d, 0x70, 0x2d, 0x67, 0xdd, 0xfe, 0x57, - 0x08, 0x20, 0x6d, 0x24, 0xe2, 0xb3, 0xc5, 0xe7, 0x50, 0x9a, 0x8d, 0xfa, 0xde, 0xb6, 0x12, 0x8d, - 0x06, 0x3f, 0xcf, 0xb2, 0xbe, 0x54, 0xe8, 0x73, 0x3e, 0xb5, 0xd6, 0xe2, 0xa6, 0xe3, 0xcf, 0x11, - 0xcc, 0xf0, 0x3e, 0x11, 0x3e, 0x39, 0x0e, 0xb3, 0xda, 0x46, 0xda, 0x4b, 0xd5, 0x9f, 0xe6, 0x50, - 0x97, 0x5a, 0x45, 0x81, 0x6b, 0x0d, 0xad, 0xe0, 0x3e, 0xcc, 0xc6, 0x3d, 0x9b, 0xf1, 0xe6, 0x91, - 0xe9, 0xe9, 0xe8, 0x4b, 0x05, 0x89, 0x34, 0x36, 0x54, 0x11, 0x33, 0x57, 0x26, 0xc5, 0xcc, 0x69, - 0x16, 0xd6, 0xf0, 0x89, 0xa2, 0xa0, 0xf7, 0x29, 0x28, 0xe6, 0x1c, 0x47, 0x77, 0xca, 0x58, 0x9a, - 0x14, 0x37, 0x99, 0x76, 0x7e, 0x8a, 0xe0, 0xe0, 0x70, 0xc9, 0x89, 0x8f, 0x0c, 0xc5, 0x4c, 0xb5, - 0xce, 0xd6, 0xb3, 0x5a, 0x1c, 0x57, 0xae, 0x1a, 0x5f, 0xe3, 0x28, 0xd6, 0xf0, 0xe5, 0x89, 0x9e, - 0x71, 0x57, 0x46, 0x1d, 0xc6, 0x68, 0x35, 0xfd, 0x66, 0xf1, 0x6b, 0x04, 0xf3, 0x92, 0xef, 0xfd, - 0x80, 0xd2, 0x62, 0x58, 0x7b, 0xe7, 0x08, 0x4c, 0x96, 0xf1, 0x2a, 0x87, 0xff, 0x65, 0x7c, 0xa9, - 0x24, 0x7c, 0x09, 0x7b, 0x35, 0x62, 0x48, 0x7f, 0x8f, 0xe0, 0xd0, 0xc3, 0xd8, 0xee, 0x3f, 0x23, - 0xfc, 0xd7, 0x38, 0xfe, 0xaf, 0xe0, 0x57, 0x0a, 0xea, 0xa2, 0x49, 0xc7, 0xb8, 0x80, 0xf0, 0x2f, - 0x11, 0x54, 0x65, 0x07, 0x1e, 0x9f, 0x19, 0xeb, 0x18, 0xd9, 0x1e, 0xfd, 0x5e, 0x1a, 0xb3, 0x28, - 0x02, 0x8c, 0x93, 0x85, 0xa9, 0x54, 0xc8, 0x67, 0x06, 0xfd, 0x3e, 0x02, 0x9c, 0xbc, 0x17, 0x93, - 0x17, 0x24, 0x3e, 0x9d, 0x11, 0x35, 0xb6, 0xc1, 0xa0, 0x9f, 0x99, 0xb8, 0x2e, 0x9b, 0x4a, 0x57, - 0x0a, 0x53, 0xa9, 0x97, 0xc8, 0x7f, 0x07, 0x41, 0xfd, 0x26, 0x4d, 0x6a, 0xf6, 0x02, 0x5d, 0x66, - 0x3f, 0x2d, 0xe8, 0xcb, 0x93, 0x17, 0x0a, 0x44, 0xe7, 0x39, 0xa2, 0xd3, 0xb8, 0x58, 0x55, 0x12, - 0xc0, 0x87, 0x08, 0xf6, 0xdd, 0x53, 0x4d, 0x14, 0x9f, 0x9f, 0x24, 0x29, 0x13, 0xc9, 0xcb, 0xe3, - 0xfa, 0x22, 0xc7, 0xb5, 0x6a, 0x94, 0xc2, 0xb5, 0x26, 0xfa, 0xf7, 0x3f, 0x43, 0xf1, 0xd3, 0x6e, - 0xa8, 0xfb, 0xfa, 0xff, 0xea, 0xad, 0xa0, 0x89, 0x6b, 0x5c, 0xe2, 0xf8, 0x1a, 0xf8, 0x7c, 0x19, - 0x7c, 0x4d, 0xd1, 0x92, 0xc5, 0x1f, 0x20, 0x38, 0xc4, 0xfb, 0xdf, 0x2a, 0xe3, 0xa1, 0x14, 0x33, - 0xae, 0x5b, 0x5e, 0x22, 0xc5, 0x88, 0xf8, 0x63, 0xec, 0x08, 0xd4, 0x9a, 0xec, 0x6d, 0xff, 0x04, - 0xc1, 0x7e, 0x99, 0xd4, 0xc4, 0xed, 0xae, 0x4e, 0x52, 0xdc, 0x4e, 0x93, 0xa0, 0x30, 0xb7, 0x95, - 0x72, 0xe6, 0xf6, 0x09, 0x82, 0x39, 0xd1, 0x7b, 0x2e, 0x28, 0x15, 0x94, 0xe6, 0xb4, 0x3e, 0xf4, - 0xf2, 0x17, 0x4d, 0x4d, 0xe3, 0x3b, 0x5c, 0xec, 0x03, 0xdc, 0x2c, 0x12, 0xeb, 0x7b, 0xed, 0xb0, - 0xf9, 0x44, 0x74, 0x14, 0x9f, 0x36, 0x1d, 0xaf, 0x13, 0xbe, 0x61, 0xe0, 0xc2, 0x84, 0xc8, 0xd6, - 0x5c, 0x40, 0x38, 0x82, 0x1a, 0x33, 0x0e, 0xde, 0x4e, 0xc0, 0x4b, 0x43, 0xcd, 0x87, 0x91, 0x4e, - 0x83, 0xae, 0x8f, 0xb4, 0x27, 0xd2, 0x0c, 0x28, 0x9e, 0x7d, 0xf8, 0x78, 0xa1, 0x58, 0x2e, 0xe8, - 0x6d, 0x04, 0x87, 0x54, 0x6b, 0x8f, 0xc5, 0x97, 0xb6, 0xf5, 0x22, 0x14, 0xa2, 0xa8, 0xc6, 0x2b, - 0xa5, 0x0c, 0x89, 0xc3, 0xb9, 0x7a, 0xe3, 0x0f, 0xcf, 0x8e, 0xa1, 0x3f, 0x3f, 0x3b, 0x86, 0xfe, - 0xf9, 0xec, 0x18, 0x7a, 0xe3, 0x72, 0xb9, 0xbf, 0x24, 0x5a, 0x8e, 0x4d, 0xdd, 0x48, 0x65, 0xff, - 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x96, 0x44, 0xa9, 0x6d, 0x78, 0x29, 0x00, 0x00, + 0x15, 0xa7, 0x66, 0xbf, 0x66, 0xde, 0xec, 0xfa, 0xa3, 0x12, 0x2f, 0x9d, 0xf6, 0xc6, 0x6c, 0xda, + 0x76, 0xbc, 0x59, 0x7b, 0x67, 0xec, 0xc1, 0x20, 0x67, 0x93, 0x08, 0xec, 0xf5, 0x27, 0xac, 0x1d, + 0xd3, 0x6b, 0x63, 0x14, 0x0e, 0x50, 0xe9, 0xae, 0x9d, 0x6d, 0xb6, 0xa7, 0xbb, 0xdd, 0xdd, 0x33, + 0xd6, 0xca, 0xf8, 0x12, 0x64, 0x09, 0xa1, 0x08, 0x04, 0xe4, 0x80, 0x10, 0x02, 0x14, 0x14, 0x09, + 0x21, 0x10, 0x17, 0x14, 0x21, 0x21, 0x24, 0xb8, 0x20, 0x38, 0x20, 0x21, 0x38, 0x72, 0x41, 0x16, + 0xe2, 0x08, 0x97, 0xfc, 0x01, 0xa8, 0xaa, 0xab, 0xba, 0xab, 0xe7, 0xa3, 0x67, 0x96, 0x19, 0x14, + 0xdf, 0xfa, 0xd5, 0x54, 0xbd, 0xf7, 0xab, 0x57, 0xbf, 0x7a, 0xaf, 0xea, 0xd5, 0xc0, 0x89, 0x88, + 0x86, 0x1d, 0x1a, 0xd6, 0x49, 0x10, 0xb8, 0x8e, 0x45, 0x62, 0xc7, 0xf7, 0xd4, 0xef, 0x5a, 0x10, + 0xfa, 0xb1, 0x8f, 0xab, 0x4a, 0x93, 0xbe, 0xd4, 0xf4, 0xfd, 0xa6, 0x4b, 0xeb, 0x24, 0x70, 0xea, + 0xc4, 0xf3, 0xfc, 0x98, 0x37, 0x47, 0x49, 0x57, 0xdd, 0xd8, 0xbd, 0x10, 0xd5, 0x1c, 0x9f, 0xff, + 0x6a, 0xf9, 0x21, 0xad, 0x77, 0xce, 0xd5, 0x9b, 0xd4, 0xa3, 0x21, 0x89, 0xa9, 0x2d, 0xfa, 0x9c, + 0xcf, 0xfa, 0xb4, 0x88, 0xb5, 0xe3, 0x78, 0x34, 0xdc, 0xab, 0x07, 0xbb, 0x4d, 0xd6, 0x10, 0xd5, + 0x5b, 0x34, 0x26, 0xfd, 0x46, 0x6d, 0x36, 0x9d, 0x78, 0xa7, 0xfd, 0x66, 0xcd, 0xf2, 0x5b, 0x75, + 0x12, 0x36, 0xfd, 0x20, 0xf4, 0xbf, 0xc2, 0x3f, 0xd6, 0x2c, 0xbb, 0xde, 0x69, 0x64, 0x0a, 0xd4, + 0xb9, 0x74, 0xce, 0x11, 0x37, 0xd8, 0x21, 0xbd, 0xda, 0xae, 0x0c, 0xd1, 0x16, 0xd2, 0xc0, 0x17, + 0xbe, 0xe1, 0x9f, 0x4e, 0xec, 0x87, 0x7b, 0xca, 0x67, 0xa2, 0xc6, 0xf8, 0x00, 0xc1, 0xa1, 0x8b, + 0x99, 0xbd, 0xcf, 0xb5, 0x69, 0xb8, 0x87, 0x31, 0x4c, 0x7b, 0xa4, 0x45, 0x35, 0xb4, 0x8c, 0x56, + 0x2a, 0x26, 0xff, 0xc6, 0x1a, 0xcc, 0x85, 0x74, 0x3b, 0xa4, 0xd1, 0x8e, 0x56, 0xe2, 0xcd, 0x52, + 0xc4, 0x3a, 0x94, 0x99, 0x71, 0x6a, 0xc5, 0x91, 0x36, 0xb5, 0x3c, 0xb5, 0x52, 0x31, 0x53, 0x19, + 0xaf, 0xc0, 0xc1, 0x90, 0x46, 0x7e, 0x3b, 0xb4, 0xe8, 0xe7, 0x69, 0x18, 0x39, 0xbe, 0xa7, 0x4d, + 0xf3, 0xd1, 0xdd, 0xcd, 0x4c, 0x4b, 0x44, 0x5d, 0x6a, 0xc5, 0x7e, 0xa8, 0xcd, 0xf0, 0x2e, 0xa9, + 0xcc, 0xf0, 0x30, 0xe0, 0xda, 0x6c, 0x82, 0x87, 0x7d, 0x63, 0x03, 0xe6, 0x49, 0x10, 0xdc, 0x22, + 0x2d, 0x1a, 0x05, 0xc4, 0xa2, 0xda, 0x1c, 0xff, 0x2d, 0xd7, 0xc6, 0x30, 0x0b, 0x24, 0x5a, 0x99, + 0x03, 0x93, 0xa2, 0xb1, 0x01, 0x95, 0x5b, 0xbe, 0x4d, 0x07, 0x4f, 0xb7, 0x5b, 0x7d, 0xa9, 0x57, + 0xbd, 0xf1, 0x18, 0xc1, 0x11, 0x93, 0x76, 0x1c, 0x86, 0xff, 0x26, 0x8d, 0x89, 0x4d, 0x62, 0xd2, + 0xad, 0xb1, 0x94, 0x6a, 0xd4, 0xa1, 0x1c, 0x8a, 0xce, 0x5a, 0x89, 0xb7, 0xa7, 0x72, 0x8f, 0xb5, + 0xa9, 0xe2, 0xc9, 0x24, 0x2e, 0x4c, 0x27, 0xf3, 0x2f, 0x04, 0xc7, 0x94, 0x35, 0x34, 0x85, 0x67, + 0xaf, 0x74, 0xa8, 0x17, 0x47, 0x83, 0x01, 0x9d, 0x81, 0xc3, 0x72, 0x11, 0xba, 0xe7, 0xd9, 0xfb, + 0x03, 0x83, 0xa8, 0x36, 0x4a, 0x88, 0x6a, 0x1b, 0x5e, 0x86, 0xaa, 0x94, 0xef, 0xde, 0xb8, 0x2c, + 0x60, 0xaa, 0x4d, 0x3d, 0x13, 0x9d, 0x29, 0x9e, 0xe8, 0x6c, 0x7e, 0xa2, 0x5f, 0x47, 0xa0, 0x29, + 0x13, 0xbd, 0x49, 0x3c, 0x67, 0x9b, 0x46, 0xf1, 0xa8, 0x3e, 0x47, 0x13, 0xf4, 0xf9, 0x0b, 0x50, + 0xb9, 0xea, 0xb8, 0x74, 0x63, 0xa7, 0xed, 0xed, 0xe2, 0x67, 0x61, 0xc6, 0x62, 0x1f, 0xdc, 0xf6, + 0xbc, 0x99, 0x08, 0xc6, 0xb7, 0x11, 0xbc, 0x30, 0x08, 0xed, 0x3d, 0x27, 0xde, 0x61, 0xe3, 0xa3, + 0x41, 0xb0, 0xad, 0x1d, 0x6a, 0xed, 0x46, 0xed, 0x96, 0xa4, 0x8a, 0x94, 0xc7, 0x84, 0xfd, 0x33, + 0x04, 0x2b, 0x43, 0x31, 0xdd, 0x0b, 0x49, 0x10, 0xd0, 0x10, 0x5f, 0x85, 0x99, 0xfb, 0xec, 0x07, + 0xbe, 0x31, 0xaa, 0x8d, 0x5a, 0x4d, 0x0d, 0xac, 0x43, 0xb5, 0x5c, 0xff, 0x88, 0x99, 0x0c, 0xc7, + 0x35, 0xe9, 0x9e, 0x12, 0xd7, 0xb3, 0x98, 0xd3, 0x93, 0x7a, 0x91, 0xf5, 0xe7, 0xdd, 0x2e, 0xcd, + 0xc2, 0x74, 0x40, 0xc2, 0xd8, 0x38, 0x02, 0xcf, 0xe4, 0x69, 0x1d, 0xf8, 0x5e, 0x44, 0x8d, 0xdf, + 0xe4, 0x59, 0xb0, 0x11, 0x52, 0x12, 0x53, 0x93, 0xde, 0x6f, 0xd3, 0x28, 0xc6, 0xbb, 0xa0, 0xc6, + 0x7a, 0xee, 0xd5, 0x6a, 0xe3, 0x46, 0x2d, 0x0b, 0x96, 0x35, 0x19, 0x2c, 0xf9, 0xc7, 0x97, 0x2c, + 0xbb, 0xd6, 0x69, 0xd4, 0x82, 0xdd, 0x66, 0x8d, 0x85, 0xde, 0x1c, 0x32, 0x19, 0x7a, 0xd5, 0xa9, + 0x9a, 0xaa, 0x76, 0xbc, 0x08, 0xb3, 0xed, 0x20, 0xa2, 0x61, 0xcc, 0x67, 0x56, 0x36, 0x85, 0xc4, + 0xd6, 0xaf, 0x43, 0x5c, 0xc7, 0x26, 0x71, 0xb2, 0x3e, 0x65, 0x33, 0x95, 0x8d, 0xdf, 0xe6, 0xd1, + 0xdf, 0x0d, 0xec, 0x0f, 0x0b, 0xbd, 0x8a, 0xb2, 0x94, 0x47, 0xa9, 0x32, 0x68, 0x2a, 0xcf, 0xa0, + 0x5f, 0xe5, 0xf1, 0x5f, 0xa6, 0x2e, 0xcd, 0xf0, 0xf7, 0x23, 0xb3, 0x06, 0x73, 0x16, 0x89, 0x2c, + 0x62, 0x4b, 0x2b, 0x52, 0x64, 0x01, 0x28, 0x08, 0xfd, 0x80, 0x34, 0xb9, 0xa6, 0xdb, 0xbe, 0xeb, + 0x58, 0x7b, 0xc2, 0x5c, 0xef, 0x0f, 0x3d, 0xc4, 0x9f, 0x2e, 0x26, 0xfe, 0x4c, 0x1e, 0xf6, 0x71, + 0xa8, 0x6e, 0xed, 0x79, 0xd6, 0xeb, 0x01, 0xcf, 0xf5, 0x6c, 0xc7, 0x3a, 0x31, 0x6d, 0x45, 0x1a, + 0xe2, 0x79, 0x21, 0x11, 0x8c, 0xf7, 0x67, 0x60, 0x51, 0x99, 0x1b, 0x1b, 0x50, 0x34, 0xb3, 0xa2, + 0xe8, 0xb2, 0x08, 0xb3, 0x76, 0xb8, 0x67, 0xb6, 0x3d, 0x41, 0x00, 0x21, 0x31, 0xc3, 0x41, 0xd8, + 0xf6, 0x12, 0xf8, 0x65, 0x33, 0x11, 0xf0, 0x36, 0x94, 0xa3, 0x98, 0x65, 0xf7, 0xe6, 0x1e, 0x07, + 0x5e, 0x6d, 0x7c, 0x66, 0xbc, 0x45, 0x67, 0xd0, 0xb7, 0x84, 0x46, 0x33, 0xd5, 0x8d, 0xef, 0x43, + 0x45, 0x46, 0xe3, 0x48, 0x9b, 0x5b, 0x9e, 0x5a, 0xa9, 0x36, 0xb6, 0xc6, 0x37, 0xf4, 0x7a, 0xc0, + 0x4e, 0x26, 0x4a, 0xe6, 0x31, 0x33, 0x2b, 0x78, 0x09, 0x2a, 0x2d, 0x11, 0x1f, 0x22, 0x91, 0x85, + 0xb3, 0x06, 0xfc, 0x05, 0x98, 0x71, 0xbc, 0x6d, 0x3f, 0xd2, 0x2a, 0x1c, 0xcc, 0xa5, 0xf1, 0xc0, + 0xdc, 0xf0, 0xb6, 0x7d, 0x33, 0x51, 0x88, 0xef, 0xc3, 0x42, 0x48, 0xe3, 0x70, 0x4f, 0x7a, 0x41, + 0x03, 0xee, 0xd7, 0xcf, 0x8e, 0x67, 0xc1, 0x54, 0x55, 0x9a, 0x79, 0x0b, 0x78, 0x1d, 0xaa, 0x51, + 0xc6, 0x31, 0xad, 0xca, 0x0d, 0x6a, 0x39, 0x45, 0x0a, 0x07, 0x4d, 0xb5, 0x73, 0x0f, 0xbb, 0xe7, + 0x8b, 0xd9, 0xbd, 0x90, 0x67, 0xf7, 0x7f, 0x10, 0x2c, 0xf5, 0x04, 0x95, 0xad, 0x80, 0x16, 0xd2, + 0x97, 0xc0, 0x74, 0x14, 0x50, 0x8b, 0x67, 0x98, 0x6a, 0xe3, 0xe6, 0xc4, 0xa2, 0x0c, 0xb7, 0xcb, + 0x55, 0x17, 0x05, 0xc2, 0x31, 0xf7, 0xf3, 0x8f, 0x10, 0x7c, 0x54, 0xb1, 0x79, 0x9b, 0xc4, 0xd6, + 0x4e, 0xd1, 0x64, 0xd9, 0xbe, 0x63, 0x7d, 0x44, 0x3e, 0x4d, 0x04, 0x46, 0x4e, 0xfe, 0x71, 0x67, + 0x2f, 0x60, 0x00, 0xd9, 0x2f, 0x59, 0xc3, 0x98, 0x87, 0x95, 0x9f, 0x23, 0xd0, 0xd5, 0xd8, 0xeb, + 0xbb, 0xee, 0x9b, 0xc4, 0xda, 0x2d, 0x02, 0x79, 0x00, 0x4a, 0x8e, 0xcd, 0x11, 0x4e, 0x99, 0x25, + 0xc7, 0xde, 0x67, 0x10, 0xe9, 0x86, 0x3b, 0x5b, 0x0c, 0x77, 0x2e, 0x0f, 0xf7, 0x83, 0x2e, 0xb8, + 0x72, 0x2b, 0x17, 0xc0, 0x5d, 0x82, 0x8a, 0xd7, 0x75, 0x70, 0xcc, 0x1a, 0xfa, 0x1c, 0x18, 0x4b, + 0x3d, 0x07, 0x46, 0x0d, 0xe6, 0x3a, 0xe9, 0xb5, 0x80, 0xfd, 0x2c, 0x45, 0x36, 0xc5, 0x66, 0xe8, + 0xb7, 0x03, 0xe1, 0xf4, 0x44, 0x60, 0x28, 0x76, 0x1d, 0xcf, 0xd6, 0x66, 0x13, 0x14, 0xec, 0x7b, + 0xff, 0x17, 0x81, 0xdc, 0xb4, 0x7f, 0x51, 0x82, 0x8f, 0xf5, 0x99, 0xf6, 0x50, 0x3e, 0x3d, 0x1d, + 0x73, 0x4f, 0x59, 0x3d, 0x37, 0x90, 0xd5, 0xe5, 0x61, 0xac, 0xae, 0x14, 0xfb, 0x0b, 0xf2, 0xfe, + 0xfa, 0x69, 0x09, 0x96, 0xfb, 0xf8, 0x6b, 0xf8, 0x31, 0xe0, 0xa9, 0x71, 0xd8, 0xb6, 0x1f, 0x0a, + 0x96, 0x94, 0xcd, 0x44, 0x60, 0xfb, 0xcc, 0x0f, 0x83, 0x1d, 0xe2, 0x71, 0x76, 0x94, 0x4d, 0x21, + 0x8d, 0xe9, 0xaa, 0x6f, 0x94, 0x40, 0x93, 0xfe, 0xb9, 0x68, 0x71, 0x6f, 0xb5, 0xbd, 0xa7, 0xdf, + 0x45, 0x8b, 0x30, 0x4b, 0x38, 0x5a, 0x41, 0x2a, 0x21, 0xf5, 0x38, 0xa3, 0x5c, 0xec, 0x8c, 0x4a, + 0xde, 0x19, 0x8f, 0x11, 0x1c, 0xcd, 0x3b, 0x23, 0xda, 0x74, 0xa2, 0x58, 0x1e, 0xea, 0xf1, 0x36, + 0xcc, 0x25, 0x76, 0x92, 0x23, 0x59, 0xb5, 0xb1, 0x39, 0x6e, 0xa2, 0xce, 0x39, 0x5e, 0x2a, 0x37, + 0x5e, 0x86, 0xa3, 0x7d, 0xa3, 0x9c, 0x80, 0xa1, 0x43, 0x59, 0x1e, 0x4e, 0xc4, 0xd2, 0xa4, 0xb2, + 0xf1, 0x78, 0x3a, 0x9f, 0x72, 0x7c, 0x7b, 0xd3, 0x6f, 0x16, 0xdc, 0xaf, 0x8b, 0x97, 0x93, 0xb9, + 0xca, 0xb7, 0x95, 0xab, 0xb4, 0x14, 0xd9, 0x38, 0xcb, 0xf7, 0x62, 0xe2, 0x78, 0x34, 0x14, 0x59, + 0x31, 0x6b, 0x60, 0xcb, 0x10, 0x39, 0x9e, 0x45, 0xb7, 0xa8, 0xe5, 0x7b, 0x76, 0xc4, 0xd7, 0x73, + 0xca, 0xcc, 0xb5, 0xe1, 0xeb, 0x50, 0xe1, 0xf2, 0x1d, 0xa7, 0x95, 0xa4, 0x81, 0x6a, 0x63, 0xb5, + 0x96, 0xd4, 0xac, 0x6a, 0x6a, 0xcd, 0x2a, 0xf3, 0x61, 0x8b, 0xc6, 0xa4, 0xd6, 0x39, 0x57, 0x63, + 0x23, 0xcc, 0x6c, 0x30, 0xc3, 0x12, 0x13, 0xc7, 0xdd, 0x74, 0x3c, 0x7e, 0x60, 0x64, 0xa6, 0xb2, + 0x06, 0x46, 0x95, 0x6d, 0xdf, 0x75, 0xfd, 0x07, 0x72, 0xdf, 0x24, 0x12, 0x1b, 0xd5, 0xf6, 0x62, + 0xc7, 0xe5, 0xf6, 0x13, 0x22, 0x64, 0x0d, 0x7c, 0x94, 0xe3, 0xc6, 0x34, 0x14, 0x1b, 0x46, 0x48, + 0x29, 0x19, 0xab, 0x49, 0x19, 0x46, 0xee, 0xd7, 0x84, 0xb6, 0xf3, 0x2a, 0x6d, 0xbb, 0xb7, 0xc2, + 0x42, 0x9f, 0x5a, 0x04, 0xaf, 0x4a, 0xd1, 0x8e, 0xe3, 0xb7, 0x23, 0xed, 0x40, 0x72, 0xf4, 0x90, + 0x72, 0x0f, 0x95, 0x0f, 0x16, 0x53, 0xf9, 0x50, 0x9e, 0xca, 0xbf, 0x43, 0x50, 0xde, 0xf4, 0x9b, + 0x57, 0xbc, 0x38, 0xdc, 0xe3, 0xb7, 0x1b, 0xdf, 0x8b, 0xa9, 0x27, 0xf9, 0x22, 0x45, 0xb6, 0x08, + 0xb1, 0xd3, 0xa2, 0x5b, 0x31, 0x69, 0x05, 0xe2, 0x8c, 0xb5, 0xaf, 0x45, 0x48, 0x07, 0x33, 0xc7, + 0xb8, 0x24, 0x8a, 0xf9, 0x8e, 0x2f, 0x9b, 0xfc, 0x9b, 0x4d, 0x21, 0xed, 0xb0, 0x15, 0x87, 0x62, + 0xbb, 0xe7, 0xda, 0x54, 0x8a, 0xcd, 0x24, 0xd8, 0x84, 0x68, 0xb4, 0xe0, 0xb9, 0xf4, 0xd0, 0x7e, + 0x87, 0x86, 0x2d, 0xc7, 0x23, 0xc5, 0xd1, 0x7b, 0x84, 0x72, 0x58, 0xc1, 0x9d, 0xd1, 0xcf, 0x6d, + 0x3a, 0x76, 0x06, 0xbe, 0xe7, 0x78, 0xb6, 0xff, 0xa0, 0x60, 0xf3, 0x8c, 0x67, 0xf0, 0xaf, 0xf9, + 0x8a, 0x98, 0x62, 0x31, 0xdd, 0xe9, 0xd7, 0x61, 0x81, 0xc5, 0x84, 0x0e, 0x15, 0x3f, 0x88, 0xb0, + 0x63, 0x0c, 0x2a, 0x72, 0x64, 0x3a, 0xcc, 0xfc, 0x40, 0xbc, 0x09, 0x07, 0x49, 0x14, 0x39, 0x4d, + 0x8f, 0xda, 0x52, 0x57, 0x69, 0x64, 0x5d, 0xdd, 0x43, 0x93, 0xeb, 0x32, 0xef, 0x21, 0xd6, 0x5b, + 0x8a, 0xc6, 0xd7, 0x10, 0x1c, 0xe9, 0xab, 0x24, 0xdd, 0x39, 0x48, 0x09, 0xe3, 0x3a, 0x94, 0x23, + 0x6b, 0x87, 0xda, 0x6d, 0x97, 0xca, 0x1a, 0x92, 0x94, 0xd9, 0x6f, 0x76, 0x3b, 0x59, 0x7d, 0x91, + 0x46, 0x52, 0x19, 0x1f, 0x03, 0x68, 0x11, 0xaf, 0x4d, 0x5c, 0x0e, 0x61, 0x9a, 0x43, 0x50, 0x5a, + 0x8c, 0x25, 0xd0, 0xfb, 0x51, 0x47, 0xd4, 0x66, 0xfe, 0x8d, 0xe0, 0x80, 0x0c, 0xaa, 0x62, 0x75, + 0x57, 0xe0, 0xa0, 0xe2, 0x86, 0x5b, 0xd9, 0x42, 0x77, 0x37, 0x0f, 0x09, 0x98, 0x92, 0x25, 0x53, + 0xf9, 0xa2, 0x74, 0x27, 0x57, 0x56, 0x1e, 0x39, 0xdf, 0xa1, 0x09, 0x9d, 0x1f, 0xbf, 0x0a, 0xda, + 0x4d, 0xe2, 0x91, 0x26, 0xb5, 0xd3, 0x69, 0xa7, 0x14, 0xfb, 0xb2, 0x5a, 0x64, 0x18, 0xfb, 0x4a, + 0x9f, 0x1e, 0xb5, 0x9c, 0xed, 0x6d, 0x59, 0xb0, 0x08, 0xa1, 0xbc, 0xe9, 0x78, 0xbb, 0xec, 0xde, + 0xcb, 0x66, 0x1c, 0x3b, 0xb1, 0x2b, 0xbd, 0x9b, 0x08, 0xf8, 0x10, 0x4c, 0xb5, 0x43, 0x57, 0x30, + 0x80, 0x7d, 0xe2, 0x65, 0xa8, 0xda, 0x34, 0xb2, 0x42, 0x27, 0x10, 0xeb, 0xcf, 0x8b, 0xb4, 0x4a, + 0x13, 0x5b, 0x07, 0xc7, 0xf2, 0xbd, 0x0d, 0x97, 0x44, 0x91, 0x4c, 0x40, 0x69, 0x83, 0xf1, 0x2a, + 0x2c, 0x30, 0x9b, 0xd9, 0x34, 0x4f, 0xe7, 0xa7, 0x79, 0x24, 0x07, 0x5f, 0xc2, 0x93, 0x88, 0x09, + 0x3c, 0xc3, 0xf2, 0xfe, 0xc5, 0x20, 0x10, 0x4a, 0x46, 0x3c, 0x0e, 0x4d, 0xf5, 0xcb, 0x9f, 0x7d, + 0x6b, 0x9c, 0x8d, 0xbf, 0x1f, 0x07, 0xac, 0xee, 0x13, 0x1a, 0x76, 0x1c, 0x8b, 0xe2, 0xef, 0x20, + 0x98, 0x66, 0xa6, 0xf1, 0xf3, 0x83, 0xb6, 0x25, 0xe7, 0xab, 0x3e, 0xb9, 0x8b, 0x30, 0xb3, 0x66, + 0x2c, 0xbd, 0xf5, 0xb7, 0x7f, 0x7e, 0xb7, 0xb4, 0x88, 0x9f, 0xe5, 0x2f, 0x4a, 0x9d, 0x73, 0xea, + 0xeb, 0x4e, 0x84, 0xdf, 0x46, 0x80, 0xc5, 0x39, 0x48, 0xa9, 0xd9, 0xe3, 0xd3, 0x83, 0x20, 0xf6, + 0xa9, 0xed, 0xeb, 0xcf, 0x2b, 0x59, 0xa5, 0x66, 0xf9, 0x21, 0x65, 0x39, 0x84, 0x77, 0xe0, 0x00, + 0x56, 0x39, 0x80, 0x13, 0xd8, 0xe8, 0x07, 0xa0, 0xfe, 0x90, 0x79, 0xf4, 0x51, 0x9d, 0x26, 0x76, + 0xdf, 0x45, 0x30, 0x73, 0x8f, 0xdf, 0x21, 0x86, 0x38, 0x69, 0x6b, 0x62, 0x4e, 0xe2, 0xe6, 0x38, + 0x5a, 0xe3, 0x38, 0x47, 0xfa, 0x3c, 0x3e, 0x2a, 0x91, 0x46, 0x71, 0x48, 0x49, 0x2b, 0x07, 0xf8, + 0x2c, 0xc2, 0xef, 0x21, 0x98, 0x4d, 0x8a, 0xbe, 0xf8, 0xe4, 0x20, 0x94, 0xb9, 0xa2, 0xb0, 0x3e, + 0xb9, 0x0a, 0xaa, 0xf1, 0x12, 0xc7, 0x78, 0xdc, 0xe8, 0xbb, 0x9c, 0xeb, 0xb9, 0xfa, 0xea, 0x3b, + 0x08, 0xa6, 0xae, 0xd1, 0xa1, 0x7c, 0x9b, 0x20, 0xb8, 0x1e, 0x07, 0xf6, 0x59, 0x6a, 0xfc, 0x13, + 0x04, 0xcf, 0x5d, 0xa3, 0x71, 0xff, 0xf4, 0x88, 0x57, 0x86, 0xe7, 0x2c, 0x41, 0xbb, 0xd3, 0x23, + 0xf4, 0x4c, 0xf3, 0x42, 0x9d, 0x23, 0x7b, 0x09, 0x9f, 0x2a, 0x22, 0x61, 0xb4, 0xe7, 0x59, 0x0f, + 0x04, 0x8e, 0x3f, 0x21, 0x38, 0xd4, 0xfd, 0xb6, 0x86, 0xf3, 0x09, 0xb5, 0xef, 0xd3, 0x9b, 0x7e, + 0x6b, 0xdc, 0x28, 0x9b, 0x57, 0x6a, 0x5c, 0xe4, 0xc8, 0x5f, 0xc1, 0x2f, 0x17, 0x21, 0x97, 0x65, + 0xdf, 0xa8, 0xfe, 0x50, 0x7e, 0x3e, 0xe2, 0xef, 0xc0, 0x1c, 0xf6, 0x9f, 0x11, 0x3c, 0x2b, 0xf5, + 0x6e, 0xec, 0x90, 0x30, 0xbe, 0x4c, 0xd9, 0x19, 0x3a, 0x1a, 0x69, 0x3e, 0x63, 0x66, 0x0d, 0xd5, + 0x9e, 0x71, 0x85, 0xcf, 0xe5, 0x53, 0xf8, 0xb5, 0x7d, 0xcf, 0xc5, 0x62, 0x6a, 0x6c, 0x01, 0xfb, + 0x2d, 0x04, 0xf3, 0xd7, 0x68, 0x7c, 0x33, 0xad, 0xe2, 0x9e, 0x1c, 0xe9, 0x65, 0x48, 0x5f, 0xaa, + 0x29, 0xcf, 0xcf, 0xf2, 0xa7, 0x94, 0x22, 0x6b, 0x1c, 0xdc, 0x29, 0x7c, 0xb2, 0x08, 0x5c, 0x56, + 0x39, 0x7e, 0x17, 0xc1, 0x11, 0x15, 0x44, 0xf6, 0xa2, 0xf6, 0x89, 0xfd, 0xbd, 0x53, 0x89, 0xd7, + 0xae, 0x21, 0xe8, 0x1a, 0x1c, 0xdd, 0x19, 0xa3, 0x3f, 0x81, 0x5b, 0x3d, 0x28, 0xd6, 0xd1, 0xea, + 0x0a, 0xc2, 0xbf, 0x47, 0x30, 0x9b, 0x14, 0x63, 0x07, 0xfb, 0x28, 0xf7, 0x02, 0x34, 0xc9, 0x68, + 0x20, 0x56, 0x5b, 0x3f, 0xdb, 0xdf, 0xa1, 0xea, 0x78, 0x49, 0xd5, 0x1a, 0xf7, 0x72, 0x3e, 0x8c, + 0xbd, 0x8f, 0x00, 0xb2, 0x82, 0x32, 0x7e, 0xa9, 0x78, 0x1e, 0x4a, 0xd1, 0x59, 0x9f, 0x6c, 0x49, + 0xd9, 0xa8, 0xf1, 0xf9, 0xac, 0xe8, 0xcb, 0x85, 0x31, 0x24, 0xa0, 0xd6, 0x7a, 0x52, 0x7c, 0xfe, + 0x31, 0x82, 0x19, 0x5e, 0xc7, 0xc3, 0x27, 0x06, 0x61, 0x56, 0xcb, 0x7c, 0x93, 0x74, 0xfd, 0x8b, + 0x1c, 0xea, 0x72, 0xa3, 0x28, 0x10, 0xaf, 0xa3, 0x55, 0xdc, 0x81, 0xd9, 0xa4, 0x72, 0x36, 0x98, + 0x1e, 0xb9, 0xca, 0x9a, 0xbe, 0x5c, 0x70, 0x30, 0x48, 0x88, 0x2a, 0x72, 0xc0, 0xea, 0xb0, 0x1c, + 0x30, 0xcd, 0xc2, 0x34, 0x3e, 0x5e, 0x14, 0xc4, 0xff, 0x0f, 0x8e, 0x39, 0xcd, 0xd1, 0x9d, 0x34, + 0x96, 0x87, 0xe5, 0x01, 0xe6, 0x9d, 0xef, 0x21, 0x38, 0xd4, 0x7d, 0xb8, 0xc6, 0x47, 0xbb, 0x62, + 0xa6, 0x7a, 0xd7, 0xd0, 0xf3, 0x5e, 0x1c, 0x74, 0x30, 0x37, 0x3e, 0xcd, 0x51, 0xac, 0xe3, 0x0b, + 0x43, 0x77, 0xc6, 0x2d, 0x19, 0x75, 0x98, 0xa2, 0xb5, 0xec, 0x55, 0xeb, 0xd7, 0x08, 0xe6, 0xa5, + 0xde, 0x3b, 0x21, 0xa5, 0xc5, 0xb0, 0x26, 0xb7, 0x11, 0x98, 0x2d, 0xe3, 0x55, 0x0e, 0xff, 0x93, + 0xf8, 0xfc, 0x88, 0xf0, 0x25, 0xec, 0xb5, 0x98, 0x21, 0xfd, 0x03, 0x82, 0xc3, 0xf7, 0x12, 0xde, + 0x7f, 0x48, 0xf8, 0x37, 0x38, 0xfe, 0xd7, 0xf0, 0x2b, 0x05, 0xe7, 0xbc, 0x61, 0xd3, 0x38, 0x8b, + 0xf0, 0x2f, 0x11, 0x94, 0xe5, 0xab, 0x0a, 0x3e, 0x35, 0x70, 0x63, 0xe4, 0xdf, 0x5d, 0x26, 0x49, + 0x66, 0x71, 0xa8, 0x31, 0x4e, 0x14, 0xa6, 0x53, 0x61, 0x9f, 0x11, 0xfa, 0x1d, 0x04, 0x38, 0xbd, + 0x33, 0xa7, 0xb7, 0x68, 0xfc, 0x62, 0xce, 0xd4, 0xc0, 0xc2, 0x8c, 0x7e, 0x6a, 0x68, 0xbf, 0x7c, + 0x2a, 0x5d, 0x2d, 0x4c, 0xa5, 0x7e, 0x6a, 0xff, 0x9b, 0x08, 0xaa, 0xd7, 0x68, 0x7a, 0x07, 0x29, + 0xf0, 0x65, 0xfe, 0x51, 0x48, 0x5f, 0x19, 0xde, 0x51, 0x20, 0x3a, 0xc3, 0x11, 0xbd, 0x88, 0x8b, + 0x5d, 0x25, 0x01, 0xfc, 0x00, 0xc1, 0xc2, 0x6d, 0x95, 0xa2, 0xf8, 0xcc, 0x30, 0x4b, 0xb9, 0x48, + 0x3e, 0x3a, 0xae, 0x8f, 0x73, 0x5c, 0x6b, 0xc6, 0x48, 0xb8, 0xd6, 0xc5, 0xfb, 0xca, 0x0f, 0x51, + 0x72, 0x89, 0xed, 0xaa, 0x67, 0xff, 0xaf, 0x7e, 0x2b, 0x28, 0x8b, 0x1b, 0xe7, 0x39, 0xbe, 0x1a, + 0x3e, 0x33, 0x0a, 0xbe, 0xba, 0x28, 0x72, 0xe3, 0xef, 0x23, 0x38, 0xcc, 0xdf, 0x1a, 0x54, 0xc5, + 0x5d, 0x29, 0x66, 0xd0, 0xcb, 0xc4, 0x08, 0x29, 0x46, 0xc4, 0x1f, 0x63, 0x5f, 0xa0, 0xd6, 0xe5, + 0x3b, 0xc2, 0xb7, 0x10, 0x1c, 0x90, 0x49, 0x4d, 0xac, 0xee, 0xda, 0x30, 0xc7, 0xed, 0x37, 0x09, + 0x0a, 0xba, 0xad, 0x8e, 0x46, 0xb7, 0xf7, 0x10, 0xcc, 0x89, 0x6a, 0x7e, 0xc1, 0x51, 0x41, 0x29, + 0xf7, 0xeb, 0x5d, 0x35, 0x0e, 0x51, 0x0c, 0x36, 0xbe, 0xc8, 0xcd, 0xde, 0xc5, 0xf5, 0x22, 0xb3, + 0x81, 0x6f, 0x47, 0xf5, 0x87, 0xa2, 0x12, 0xfb, 0xa8, 0xee, 0xfa, 0xcd, 0xe8, 0x0d, 0x03, 0x17, + 0x26, 0x44, 0xd6, 0xe7, 0x2c, 0xc2, 0x31, 0x54, 0x18, 0x39, 0x78, 0xe1, 0x04, 0x2f, 0x77, 0x95, + 0x59, 0x7a, 0x6a, 0x2a, 0xba, 0xde, 0x53, 0x88, 0xc9, 0x32, 0xa0, 0xb8, 0xc6, 0xe2, 0x17, 0x0a, + 0xcd, 0x72, 0x43, 0x6f, 0x23, 0x38, 0xac, 0xb2, 0x3d, 0x31, 0x3f, 0x32, 0xd7, 0x8b, 0x50, 0x88, + 0x43, 0x35, 0x5e, 0x1d, 0x89, 0x48, 0x1c, 0xce, 0xa5, 0xab, 0x7f, 0x7c, 0x72, 0x0c, 0xfd, 0xe5, + 0xc9, 0x31, 0xf4, 0x8f, 0x27, 0xc7, 0xd0, 0x1b, 0x17, 0x46, 0xfb, 0x4f, 0xad, 0xe5, 0x3a, 0xd4, + 0x8b, 0x55, 0xf5, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x30, 0xc0, 0x40, 0x7a, 0x39, 0x2c, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2827,6 +2989,8 @@ type ApplicationServiceClient interface { GetApplicationSyncWindows(ctx context.Context, in *ApplicationSyncWindowsQuery, opts ...grpc.CallOption) (*ApplicationSyncWindowsResponse, error) // Get the meta-data (author, date, tags, message) for a specific revision of the application RevisionMetadata(ctx context.Context, in *RevisionMetadataQuery, opts ...grpc.CallOption) (*v1alpha1.RevisionMetadata, error) + // Get the chart metadata (description, maintainers, home) for a specific revision of the application + RevisionChartDetails(ctx context.Context, in *RevisionMetadataQuery, opts ...grpc.CallOption) (*v1alpha1.ChartDetails, error) // GetManifests returns application manifests GetManifests(ctx context.Context, in *ApplicationManifestQuery, opts ...grpc.CallOption) (*apiclient.ManifestResponse, error) // GetManifestsWithFiles returns application manifests using provided files to generate them @@ -2963,6 +3127,15 @@ func (c *applicationServiceClient) RevisionMetadata(ctx context.Context, in *Rev return out, nil } +func (c *applicationServiceClient) RevisionChartDetails(ctx context.Context, in *RevisionMetadataQuery, opts ...grpc.CallOption) (*v1alpha1.ChartDetails, error) { + out := new(v1alpha1.ChartDetails) + err := c.cc.Invoke(ctx, "/application.ApplicationService/RevisionChartDetails", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *applicationServiceClient) GetManifests(ctx context.Context, in *ApplicationManifestQuery, opts ...grpc.CallOption) (*apiclient.ManifestResponse, error) { out := new(apiclient.ManifestResponse) err := c.cc.Invoke(ctx, "/application.ApplicationService/GetManifests", in, out, opts...) @@ -3230,6 +3403,8 @@ type ApplicationServiceServer interface { GetApplicationSyncWindows(context.Context, *ApplicationSyncWindowsQuery) (*ApplicationSyncWindowsResponse, error) // Get the meta-data (author, date, tags, message) for a specific revision of the application RevisionMetadata(context.Context, *RevisionMetadataQuery) (*v1alpha1.RevisionMetadata, error) + // Get the chart metadata (description, maintainers, home) for a specific revision of the application + RevisionChartDetails(context.Context, *RevisionMetadataQuery) (*v1alpha1.ChartDetails, error) // GetManifests returns application manifests GetManifests(context.Context, *ApplicationManifestQuery) (*apiclient.ManifestResponse, error) // GetManifestsWithFiles returns application manifests using provided files to generate them @@ -3297,6 +3472,9 @@ func (*UnimplementedApplicationServiceServer) GetApplicationSyncWindows(ctx cont func (*UnimplementedApplicationServiceServer) RevisionMetadata(ctx context.Context, req *RevisionMetadataQuery) (*v1alpha1.RevisionMetadata, error) { return nil, status.Errorf(codes.Unimplemented, "method RevisionMetadata not implemented") } +func (*UnimplementedApplicationServiceServer) RevisionChartDetails(ctx context.Context, req *RevisionMetadataQuery) (*v1alpha1.ChartDetails, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevisionChartDetails not implemented") +} func (*UnimplementedApplicationServiceServer) GetManifests(ctx context.Context, req *ApplicationManifestQuery) (*apiclient.ManifestResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetManifests not implemented") } @@ -3491,6 +3669,24 @@ func _ApplicationService_RevisionMetadata_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _ApplicationService_RevisionChartDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevisionMetadataQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApplicationServiceServer).RevisionChartDetails(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/application.ApplicationService/RevisionChartDetails", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApplicationServiceServer).RevisionChartDetails(ctx, req.(*RevisionMetadataQuery)) + } + return interceptor(ctx, in, info, handler) +} + func _ApplicationService_GetManifests_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ApplicationManifestQuery) if err := dec(in); err != nil { @@ -3893,6 +4089,10 @@ var _ApplicationService_serviceDesc = grpc.ServiceDesc{ MethodName: "RevisionMetadata", Handler: _ApplicationService_RevisionMetadata_Handler, }, + { + MethodName: "RevisionChartDetails", + Handler: _ApplicationService_RevisionChartDetails_Handler, + }, { MethodName: "GetManifests", Handler: _ApplicationService_GetManifests_Handler, @@ -4139,6 +4339,13 @@ func (m *RevisionMetadataQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x22 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4191,6 +4398,13 @@ func (m *ApplicationResourceEventsQuery) MarshalToSizedBuffer(dAtA []byte) (int, i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x32 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4255,6 +4469,13 @@ func (m *ApplicationManifestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x22 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4341,6 +4562,13 @@ func (m *ApplicationManifestQueryWithFiles) MarshalToSizedBuffer(dAtA []byte) (i i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x22 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4559,6 +4787,13 @@ func (m *ApplicationUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x1a + } if m.Validate != nil { i-- if *m.Validate { @@ -4610,6 +4845,13 @@ func (m *ApplicationDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x2a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4706,6 +4948,13 @@ func (m *ApplicationSyncRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x6a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4849,6 +5098,13 @@ func (m *ApplicationUpdateSpecRequest) MarshalToSizedBuffer(dAtA []byte) (int, e i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x2a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4916,6 +5172,13 @@ func (m *ApplicationPatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x32 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -4977,6 +5240,13 @@ func (m *ApplicationRollbackRequest) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x3a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5047,6 +5317,13 @@ func (m *ApplicationResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x42 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5131,6 +5408,13 @@ func (m *ApplicationResourcePatchRequest) MarshalToSizedBuffer(dAtA []byte) (int i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x52 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5233,6 +5517,13 @@ func (m *ApplicationResourceDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (in i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x52 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5337,6 +5628,13 @@ func (m *ResourceActionRunRequest) MarshalToSizedBuffer(dAtA []byte) (int, error i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x4a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5507,6 +5805,15 @@ func (m *ApplicationPodLogsQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5728,6 +6035,13 @@ func (m *OperationTerminateRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x1a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5771,6 +6085,13 @@ func (m *ApplicationSyncWindowsQuery) MarshalToSizedBuffer(dAtA []byte) (int, er i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x1a + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -5974,6 +6295,13 @@ func (m *ResourcesQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x42 + } if m.AppNamespace != nil { i -= len(*m.AppNamespace) copy(dAtA[i:], *m.AppNamespace) @@ -6193,6 +6521,13 @@ func (m *ListAppLinksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Project != nil { + i -= len(*m.Project) + copy(dAtA[i:], *m.Project) + i = encodeVarintApplication(dAtA, i, uint64(len(*m.Project))) + i-- + dAtA[i] = 0x22 + } if m.Namespace != nil { i -= len(*m.Namespace) copy(dAtA[i:], *m.Namespace) @@ -6309,6 +6644,10 @@ func (m *RevisionMetadataQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6341,6 +6680,10 @@ func (m *ApplicationResourceEventsQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6365,6 +6708,10 @@ func (m *ApplicationManifestQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6405,6 +6752,10 @@ func (m *ApplicationManifestQueryWithFiles) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6497,6 +6848,10 @@ func (m *ApplicationUpdateRequest) Size() (n int) { if m.Validate != nil { n += 2 } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6524,6 +6879,10 @@ func (m *ApplicationDeleteRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6602,6 +6961,10 @@ func (m *ApplicationSyncRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6629,6 +6992,10 @@ func (m *ApplicationUpdateSpecRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6657,6 +7024,10 @@ func (m *ApplicationPatchRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6686,6 +7057,10 @@ func (m *ApplicationRollbackRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6726,6 +7101,10 @@ func (m *ApplicationResourceRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6774,6 +7153,10 @@ func (m *ApplicationResourcePatchRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6820,6 +7203,10 @@ func (m *ApplicationResourceDeleteRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6864,6 +7251,10 @@ func (m *ResourceActionRunRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6966,6 +7357,10 @@ func (m *ApplicationPodLogsQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 2 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -7017,6 +7412,10 @@ func (m *OperationTerminateRequest) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -7037,6 +7436,10 @@ func (m *ApplicationSyncWindowsQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -7143,6 +7546,10 @@ func (m *ResourcesQuery) Size() (n int) { l = len(*m.AppNamespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -7227,6 +7634,10 @@ func (m *ListAppLinksRequest) Size() (n int) { l = len(*m.Namespace) n += 1 + l + sovApplication(uint64(l)) } + if m.Project != nil { + l = len(*m.Project) + n += 1 + l + sovApplication(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -7800,6 +8211,39 @@ func (m *RevisionMetadataQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -8024,17 +8468,50 @@ func (m *ApplicationResourceEventsQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApplication(dAtA[iNdEx:]) - if err != nil { - return err + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthApplication } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApplication(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApplication + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy @@ -8179,6 +8656,39 @@ func (m *ApplicationManifestQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -8425,6 +8935,39 @@ func (m *ApplicationManifestQueryWithFiles) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -8847,6 +9390,39 @@ func (m *ApplicationUpdateRequest) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Validate = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -9023,6 +9599,39 @@ func (m *ApplicationDeleteRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -9511,6 +10120,39 @@ func (m *ApplicationSyncRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -9691,6 +10333,39 @@ func (m *ApplicationUpdateSpecRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -9884,6 +10559,39 @@ func (m *ApplicationPatchRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -10075,6 +10783,39 @@ func (m *ApplicationRollbackRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -10366,7 +11107,40 @@ func (m *ApplicationResourceRequest) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.AppNamespace = &s + m.AppNamespace = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s iNdEx = postIndex default: iNdEx = preIndex @@ -10735,6 +11509,39 @@ func (m *ApplicationResourcePatchRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -11082,6 +11889,39 @@ func (m *ApplicationResourceDeleteRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -11415,6 +12255,39 @@ func (m *ResourceActionRunRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -12105,6 +12978,39 @@ func (m *ApplicationPodLogsQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -12455,6 +13361,39 @@ func (m *OperationTerminateRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -12577,6 +13516,39 @@ func (m *ApplicationSyncWindowsQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -13248,6 +14220,39 @@ func (m *ResourcesQuery) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.AppNamespace = &s iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) @@ -13732,6 +14737,39 @@ func (m *ListAppLinksRequest) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Namespace = &s iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApplication + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApplication + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApplication + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Project = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApplication(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.gw.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.gw.go index 80c86e4fc9..ed6064cadb 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.gw.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/application/application.pb.gw.go @@ -459,6 +459,100 @@ func local_request_ApplicationService_RevisionMetadata_0(ctx context.Context, ma } +var ( + filter_ApplicationService_RevisionChartDetails_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0, "revision": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_ApplicationService_RevisionChartDetails_0(ctx context.Context, marshaler runtime.Marshaler, client ApplicationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RevisionMetadataQuery + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.StringP(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + val, ok = pathParams["revision"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision") + } + + protoReq.Revision, err = runtime.StringP(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ApplicationService_RevisionChartDetails_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RevisionChartDetails(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_ApplicationService_RevisionChartDetails_0(ctx context.Context, marshaler runtime.Marshaler, server ApplicationServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RevisionMetadataQuery + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.StringP(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + val, ok = pathParams["revision"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision") + } + + protoReq.Revision, err = runtime.StringP(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ApplicationService_RevisionChartDetails_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RevisionChartDetails(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_ApplicationService_GetManifests_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) @@ -2085,6 +2179,29 @@ func RegisterApplicationServiceHandlerServer(ctx context.Context, mux *runtime.S }) + mux.Handle("GET", pattern_ApplicationService_RevisionChartDetails_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ApplicationService_RevisionChartDetails_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ApplicationService_RevisionChartDetails_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_ApplicationService_GetManifests_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -2685,6 +2802,26 @@ func RegisterApplicationServiceHandlerClient(ctx context.Context, mux *runtime.S }) + mux.Handle("GET", pattern_ApplicationService_RevisionChartDetails_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ApplicationService_RevisionChartDetails_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_ApplicationService_RevisionChartDetails_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_ApplicationService_GetManifests_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -3123,6 +3260,8 @@ var ( pattern_ApplicationService_RevisionMetadata_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "applications", "name", "revisions", "revision", "metadata"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_ApplicationService_RevisionChartDetails_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "applications", "name", "revisions", "revision", "chartdetails"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_ApplicationService_GetManifests_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "applications", "name", "manifests"}, "", runtime.AssumeColonVerbOpt(true))) pattern_ApplicationService_GetManifestsWithFiles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "applications", "manifestsWithFiles"}, "", runtime.AssumeColonVerbOpt(true))) @@ -3181,6 +3320,8 @@ var ( forward_ApplicationService_RevisionMetadata_0 = runtime.ForwardResponseMessage + forward_ApplicationService_RevisionChartDetails_0 = runtime.ForwardResponseMessage + forward_ApplicationService_GetManifests_0 = runtime.ForwardResponseMessage forward_ApplicationService_GetManifestsWithFiles_0 = runtime.ForwardResponseMessage diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/version/version.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/version/version.pb.go index 35474d6312..0b58bf4a6c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/version/version.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apiclient/version/version.pb.go @@ -46,6 +46,7 @@ type VersionMessage struct { HelmVersion string `protobuf:"bytes,11,opt,name=HelmVersion,proto3" json:"HelmVersion,omitempty"` KubectlVersion string `protobuf:"bytes,12,opt,name=KubectlVersion,proto3" json:"KubectlVersion,omitempty"` JsonnetVersion string `protobuf:"bytes,13,opt,name=JsonnetVersion,proto3" json:"JsonnetVersion,omitempty"` + ExtraBuildInfo string `protobuf:"bytes,14,opt,name=ExtraBuildInfo,proto3" json:"ExtraBuildInfo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -168,6 +169,13 @@ func (m *VersionMessage) GetJsonnetVersion() string { return "" } +func (m *VersionMessage) GetExtraBuildInfo() string { + if m != nil { + return m.ExtraBuildInfo + } + return "" +} + func init() { proto.RegisterType((*VersionMessage)(nil), "version.VersionMessage") } @@ -175,32 +183,33 @@ func init() { func init() { proto.RegisterFile("server/version/version.proto", fileDescriptor_8be80977d07a4107) } var fileDescriptor_8be80977d07a4107 = []byte{ - // 399 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xdf, 0x6a, 0xdb, 0x30, - 0x18, 0xc5, 0x71, 0xb2, 0xe5, 0x8f, 0x92, 0x85, 0x21, 0x46, 0x66, 0xbc, 0x10, 0x42, 0x2e, 0xc6, - 0x18, 0xcc, 0x86, 0x6c, 0x4f, 0x90, 0x6c, 0x64, 0x2c, 0x0c, 0xc2, 0x32, 0x7a, 0xd1, 0x3b, 0xd9, - 0xf9, 0xe2, 0xaa, 0xb5, 0xfc, 0x19, 0x59, 0x36, 0xb4, 0x97, 0x7d, 0x85, 0x42, 0x9f, 0xa9, 0x97, - 0x85, 0xbe, 0x40, 0x09, 0x7d, 0x90, 0x62, 0xd9, 0x72, 0x9b, 0xf6, 0xca, 0x3a, 0xe7, 0xfc, 0x7c, - 0x10, 0x1c, 0x91, 0x51, 0x0a, 0x32, 0x07, 0xe9, 0xe5, 0x20, 0x53, 0x8e, 0xb1, 0xf9, 0xba, 0x89, - 0x44, 0x85, 0xb4, 0x5d, 0x49, 0x67, 0x14, 0x22, 0x86, 0x11, 0x78, 0x2c, 0xe1, 0x1e, 0x8b, 0x63, - 0x54, 0x4c, 0x71, 0x8c, 0xd3, 0x12, 0x73, 0x3e, 0x55, 0xa9, 0x56, 0x7e, 0xb6, 0xf3, 0x40, 0x24, - 0xea, 0xbc, 0x0c, 0xa7, 0xd7, 0x4d, 0x32, 0x38, 0x2a, 0x6b, 0xfe, 0x42, 0x9a, 0xb2, 0x10, 0xa8, - 0x4d, 0xda, 0x95, 0x63, 0x5b, 0x13, 0xeb, 0x4b, 0xf7, 0x9f, 0x91, 0x74, 0x44, 0xba, 0xf3, 0x8c, - 0x47, 0xdb, 0x9f, 0x4c, 0x81, 0xdd, 0xd0, 0xd9, 0x93, 0x51, 0xa4, 0x4b, 0xae, 0x16, 0x28, 0x04, - 0x57, 0x76, 0xb3, 0x4c, 0x6b, 0x83, 0x0e, 0x49, 0x6b, 0xc9, 0xd5, 0x7f, 0x16, 0xda, 0x6f, 0x74, - 0x54, 0x29, 0x3a, 0x25, 0xfd, 0xe2, 0x24, 0x01, 0x36, 0xaa, 0xa8, 0x7d, 0xab, 0xd3, 0x03, 0x4f, - 0x37, 0xa3, 0xb9, 0x53, 0xab, 0x6a, 0x36, 0x06, 0x75, 0x48, 0x67, 0x81, 0x22, 0xe1, 0x11, 0x48, - 0xbb, 0xad, 0xc3, 0x5a, 0x17, 0xd9, 0x3a, 0x62, 0x6a, 0x87, 0x52, 0xd8, 0x9d, 0x32, 0x33, 0x9a, - 0x7e, 0x25, 0xef, 0x57, 0x59, 0xaa, 0x50, 0xf0, 0x0b, 0x30, 0xe5, 0x44, 0x33, 0xaf, 0x7c, 0x3a, - 0x21, 0xbd, 0xdf, 0x10, 0x09, 0x83, 0xf5, 0x34, 0xf6, 0xdc, 0xa2, 0x9f, 0xc9, 0x60, 0x95, 0xf9, - 0x10, 0xa8, 0xc8, 0x40, 0x7d, 0x0d, 0xbd, 0x70, 0x0b, 0xee, 0x4f, 0x8a, 0x71, 0x0c, 0xca, 0x70, - 0xef, 0x4a, 0xee, 0xd0, 0x9d, 0xf9, 0xf5, 0x2e, 0x1b, 0x90, 0x39, 0x0f, 0x80, 0xae, 0xeb, 0x5d, - 0xe8, 0xd0, 0x2d, 0x37, 0x75, 0xcd, 0xa6, 0xee, 0xaf, 0x62, 0x53, 0xe7, 0xa3, 0x6b, 0x5e, 0xc8, - 0xe1, 0xa6, 0xd3, 0x0f, 0x97, 0x77, 0x0f, 0x57, 0x8d, 0x01, 0xed, 0xeb, 0x37, 0x52, 0x41, 0xf3, - 0xf9, 0xcd, 0x7e, 0x6c, 0xdd, 0xee, 0xc7, 0xd6, 0xfd, 0x7e, 0x6c, 0x1d, 0xff, 0x08, 0xb9, 0x3a, - 0xc9, 0x7c, 0x37, 0x40, 0xe1, 0x31, 0x19, 0x62, 0x22, 0xf1, 0x54, 0x1f, 0xbe, 0x05, 0x5b, 0x2f, - 0x9f, 0x79, 0xc9, 0x59, 0x58, 0xfc, 0x1d, 0x44, 0x1c, 0x62, 0x65, 0x3a, 0xfc, 0x96, 0xbe, 0xc2, - 0xf7, 0xc7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x97, 0xec, 0x06, 0xab, 0x02, 0x00, 0x00, + // 415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xd1, 0x6a, 0x14, 0x31, + 0x18, 0x85, 0x99, 0x56, 0x77, 0xdb, 0x74, 0x5d, 0x24, 0x48, 0x0d, 0xeb, 0xb2, 0x94, 0xbd, 0x10, + 0x11, 0x9c, 0x81, 0xea, 0x13, 0x6c, 0x2d, 0x55, 0x8b, 0x50, 0xac, 0x78, 0xe1, 0x5d, 0x66, 0xfa, + 0xef, 0x18, 0x9d, 0xe4, 0x1f, 0x32, 0xff, 0x2c, 0xea, 0xa5, 0xaf, 0xe0, 0xfb, 0x78, 0xed, 0xa5, + 0xe0, 0x0b, 0xc8, 0xe2, 0x83, 0x48, 0x92, 0xc9, 0xe8, 0xd8, 0xab, 0xcd, 0x39, 0xe7, 0xdb, 0x43, + 0x98, 0x13, 0x36, 0x6f, 0xc0, 0x6e, 0xc0, 0x66, 0x1b, 0xb0, 0x8d, 0x42, 0x13, 0x7f, 0xd3, 0xda, + 0x22, 0x21, 0x1f, 0x77, 0x72, 0x36, 0x2f, 0x11, 0xcb, 0x0a, 0x32, 0x59, 0xab, 0x4c, 0x1a, 0x83, + 0x24, 0x49, 0xa1, 0x69, 0x02, 0x36, 0xbb, 0xd7, 0xa5, 0x5e, 0xe5, 0xed, 0x3a, 0x03, 0x5d, 0xd3, + 0xa7, 0x10, 0x2e, 0xbf, 0xed, 0xb2, 0xe9, 0x9b, 0x50, 0xf3, 0x12, 0x9a, 0x46, 0x96, 0xc0, 0x05, + 0x1b, 0x77, 0x8e, 0x48, 0x8e, 0x92, 0x07, 0xfb, 0xaf, 0xa2, 0xe4, 0x73, 0xb6, 0xbf, 0x6a, 0x55, + 0x75, 0xf5, 0x54, 0x12, 0x88, 0x1d, 0x9f, 0xfd, 0x35, 0x5c, 0x7a, 0xa6, 0xe8, 0x04, 0xb5, 0x56, + 0x24, 0x76, 0x43, 0xda, 0x1b, 0xfc, 0x90, 0x8d, 0xce, 0x14, 0xbd, 0x96, 0xa5, 0xb8, 0xe1, 0xa3, + 0x4e, 0xf1, 0x25, 0x9b, 0xb8, 0x93, 0x05, 0xb8, 0x24, 0x57, 0x7b, 0xd3, 0xa7, 0x03, 0xcf, 0x37, + 0x63, 0xbc, 0xd3, 0xa8, 0x6b, 0x8e, 0x06, 0x9f, 0xb1, 0xbd, 0x13, 0xd4, 0xb5, 0xaa, 0xc0, 0x8a, + 0xb1, 0x0f, 0x7b, 0xed, 0xb2, 0x8b, 0x4a, 0xd2, 0x1a, 0xad, 0x16, 0x7b, 0x21, 0x8b, 0x9a, 0x3f, + 0x64, 0xb7, 0xcf, 0xdb, 0x86, 0x50, 0xab, 0xcf, 0x10, 0xcb, 0x99, 0x67, 0xae, 0xf9, 0xfc, 0x88, + 0x1d, 0x3c, 0x83, 0x4a, 0x47, 0xec, 0xc0, 0x63, 0xff, 0x5a, 0xfc, 0x3e, 0x9b, 0x9e, 0xb7, 0x39, + 0x14, 0x54, 0x45, 0x68, 0xe2, 0xa1, 0xff, 0x5c, 0xc7, 0xbd, 0x68, 0xd0, 0x18, 0xa0, 0xc8, 0xdd, + 0x0a, 0xdc, 0xd0, 0x75, 0xdc, 0xe9, 0x47, 0xb2, 0xd2, 0x7f, 0xdf, 0xe7, 0x66, 0x8d, 0x62, 0x1a, + 0xb8, 0xa1, 0x7b, 0x9c, 0xf7, 0xfb, 0x5d, 0x82, 0xdd, 0xa8, 0x02, 0xf8, 0x45, 0xbf, 0x1f, 0x3f, + 0x4c, 0xc3, 0xf6, 0x69, 0xdc, 0x3e, 0x3d, 0x75, 0xdb, 0xcf, 0xee, 0xa6, 0xf1, 0x25, 0x0d, 0xb7, + 0x5f, 0xde, 0xf9, 0xf2, 0xf3, 0xf7, 0xd7, 0x9d, 0x29, 0x9f, 0xf8, 0xb7, 0xd4, 0x41, 0xab, 0xd5, + 0xf7, 0xed, 0x22, 0xf9, 0xb1, 0x5d, 0x24, 0xbf, 0xb6, 0x8b, 0xe4, 0xed, 0x93, 0x52, 0xd1, 0xbb, + 0x36, 0x4f, 0x0b, 0xd4, 0x99, 0xb4, 0x25, 0xd6, 0x16, 0xdf, 0xfb, 0xc3, 0xa3, 0xe2, 0x2a, 0xdb, + 0x1c, 0x67, 0xf5, 0x87, 0xd2, 0xfd, 0xbb, 0xa8, 0x14, 0x18, 0x8a, 0x1d, 0xf9, 0xc8, 0x5f, 0xe1, + 0xf1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x21, 0xc0, 0xd1, 0xd3, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -309,6 +318,13 @@ func (m *VersionMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.ExtraBuildInfo) > 0 { + i -= len(m.ExtraBuildInfo) + copy(dAtA[i:], m.ExtraBuildInfo) + i = encodeVarintVersion(dAtA, i, uint64(len(m.ExtraBuildInfo))) + i-- + dAtA[i] = 0x72 + } if len(m.JsonnetVersion) > 0 { i -= len(m.JsonnetVersion) copy(dAtA[i:], m.JsonnetVersion) @@ -461,6 +477,10 @@ func (m *VersionMessage) Size() (n int) { if l > 0 { n += 1 + l + sovVersion(uint64(l)) } + l = len(m.ExtraBuildInfo) + if l > 0 { + n += 1 + l + sovVersion(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -886,6 +906,38 @@ func (m *VersionMessage) Unmarshal(dAtA []byte) error { } m.JsonnetVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraBuildInfo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVersion + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVersion + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthVersion + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtraBuildInfo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipVersion(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go index 508fc226fe..19edd05646 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go @@ -22,6 +22,7 @@ import ( "sort" "github.com/argoproj/argo-cd/v2/common" + "github.com/argoproj/argo-cd/v2/util/security" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,17 +49,25 @@ type ApplicationSet struct { } // RBACName formats fully qualified application name for RBAC check. -func (a *ApplicationSet) RBACName() string { - return fmt.Sprintf("%s/%s", a.Spec.Template.Spec.GetProject(), a.ObjectMeta.Name) +func (a *ApplicationSet) RBACName(defaultNS string) string { + return security.RBACName(defaultNS, a.Spec.Template.Spec.GetProject(), a.Namespace, a.Name) } // ApplicationSetSpec represents a class of application set state. type ApplicationSetSpec struct { - GoTemplate bool `json:"goTemplate,omitempty" protobuf:"bytes,1,name=goTemplate"` - Generators []ApplicationSetGenerator `json:"generators" protobuf:"bytes,2,name=generators"` - Template ApplicationSetTemplate `json:"template" protobuf:"bytes,3,name=template"` - SyncPolicy *ApplicationSetSyncPolicy `json:"syncPolicy,omitempty" protobuf:"bytes,4,name=syncPolicy"` - Strategy *ApplicationSetStrategy `json:"strategy,omitempty" protobuf:"bytes,5,opt,name=strategy"` + GoTemplate bool `json:"goTemplate,omitempty" protobuf:"bytes,1,name=goTemplate"` + Generators []ApplicationSetGenerator `json:"generators" protobuf:"bytes,2,name=generators"` + Template ApplicationSetTemplate `json:"template" protobuf:"bytes,3,name=template"` + SyncPolicy *ApplicationSetSyncPolicy `json:"syncPolicy,omitempty" protobuf:"bytes,4,name=syncPolicy"` + Strategy *ApplicationSetStrategy `json:"strategy,omitempty" protobuf:"bytes,5,opt,name=strategy"` + PreservedFields *ApplicationPreservedFields `json:"preservedFields,omitempty" protobuf:"bytes,6,opt,name=preservedFields"` + GoTemplateOptions []string `json:"goTemplateOptions,omitempty" protobuf:"bytes,7,opt,name=goTemplateOptions"` + // ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators + ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"` +} + +type ApplicationPreservedFields struct { + Annotations []string `json:"annotations,omitempty" protobuf:"bytes,1,name=annotations"` } // ApplicationSetStrategy configures how generated Applications are updated in sequence. @@ -82,11 +91,39 @@ type ApplicationMatchExpression struct { Values []string `json:"values,omitempty" protobuf:"bytes,3,opt,name=values"` } +// ApplicationsSyncPolicy representation +// "create-only" means applications are only created. If the generator's result contains update, applications won't be updated +// "create-update" means applications are only created/Updated. If the generator's result contains update, applications will be updated, but not deleted +// "create-delete" means applications are only created/deleted. If the generator's result contains update, applications won't be updated, if it results in deleted applications, the applications will be deleted +// "sync" means create/update/deleted. If the generator's result contains update, applications will be updated, if it results in deleted applications, the applications will be deleted +// If no ApplicationsSyncPolicy is defined, it defaults it to sync +type ApplicationsSyncPolicy string + +// sync / create-only / create-update / create-delete +const ( + ApplicationsSyncPolicyCreateOnly ApplicationsSyncPolicy = "create-only" + ApplicationsSyncPolicyCreateUpdate ApplicationsSyncPolicy = "create-update" + ApplicationsSyncPolicyCreateDelete ApplicationsSyncPolicy = "create-delete" + ApplicationsSyncPolicySync ApplicationsSyncPolicy = "sync" +) + +func (s ApplicationsSyncPolicy) AllowUpdate() bool { + return s == ApplicationsSyncPolicyCreateUpdate || s == ApplicationsSyncPolicySync +} + +func (s ApplicationsSyncPolicy) AllowDelete() bool { + return s == ApplicationsSyncPolicySync || s == ApplicationsSyncPolicyCreateDelete +} + // ApplicationSetSyncPolicy configures how generated Applications will relate to their // ApplicationSet. type ApplicationSetSyncPolicy struct { // PreserveResourcesOnDeletion will preserve resources on deletion. If PreserveResourcesOnDeletion is set to true, these Applications will not be deleted. PreserveResourcesOnDeletion bool `json:"preserveResourcesOnDeletion,omitempty" protobuf:"bytes,1,name=syncPolicy"` + // ApplicationsSync represents the policy applied on the generated applications. Possible values are create-only, create-update, create-delete, sync + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=create-only;create-update;create-delete;sync + ApplicationsSync *ApplicationsSyncPolicy `json:"applicationsSync,omitempty" protobuf:"bytes,2,opt,name=applicationsSync,casttype=ApplicationsSyncPolicy"` } // ApplicationSetTemplate represents argocd ApplicationSpec @@ -118,6 +155,8 @@ type ApplicationSetGenerator struct { // Selector allows to post-filter all generator. Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,9,name=selector"` + + Plugin *PluginGenerator `json:"plugin,omitempty" protobuf:"bytes,10,name=plugin"` } // ApplicationSetNestedGenerator represents a generator nested within a combination-type generator (MatrixGenerator or @@ -138,6 +177,8 @@ type ApplicationSetNestedGenerator struct { // Selector allows to post-filter all generator. Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,9,name=selector"` + + Plugin *PluginGenerator `json:"plugin,omitempty" protobuf:"bytes,10,name=plugin"` } type ApplicationSetNestedGenerators []ApplicationSetNestedGenerator @@ -153,6 +194,10 @@ type ApplicationSetTerminalGenerator struct { SCMProvider *SCMProviderGenerator `json:"scmProvider,omitempty" protobuf:"bytes,4,name=scmProvider"` ClusterDecisionResource *DuckTypeGenerator `json:"clusterDecisionResource,omitempty" protobuf:"bytes,5,name=clusterDecisionResource"` PullRequest *PullRequestGenerator `json:"pullRequest,omitempty" protobuf:"bytes,6,name=pullRequest"` + Plugin *PluginGenerator `json:"plugin,omitempty" protobuf:"bytes,7,name=pullRequest"` + + // Selector allows to post-filter all generator. + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,8,name=selector"` } type ApplicationSetTerminalGenerators []ApplicationSetTerminalGenerator @@ -170,6 +215,8 @@ func (g ApplicationSetTerminalGenerators) toApplicationSetNestedGenerators() []A SCMProvider: terminalGenerator.SCMProvider, ClusterDecisionResource: terminalGenerator.ClusterDecisionResource, PullRequest: terminalGenerator.PullRequest, + Plugin: terminalGenerator.Plugin, + Selector: terminalGenerator.Selector, } } return nestedGenerators @@ -177,8 +224,9 @@ func (g ApplicationSetTerminalGenerators) toApplicationSetNestedGenerators() []A // ListGenerator include items info type ListGenerator struct { - Elements []apiextensionsv1.JSON `json:"elements" protobuf:"bytes,1,name=elements"` - Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,2,name=template"` + Elements []apiextensionsv1.JSON `json:"elements" protobuf:"bytes,1,name=elements"` + Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,2,name=template"` + ElementsYaml string `json:"elementsYaml,omitempty" protobuf:"bytes,3,opt,name=elementsYaml"` } // MatrixGenerator generates the cartesian product of two sets of parameters. The parameters are defined by two nested @@ -314,6 +362,9 @@ type GitGenerator struct { RequeueAfterSeconds *int64 `json:"requeueAfterSeconds,omitempty" protobuf:"bytes,5,name=requeueAfterSeconds"` Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,6,name=template"` PathParamPrefix string `json:"pathParamPrefix,omitempty" protobuf:"bytes,7,name=pathParamPrefix"` + + // Values contains key/value pairs which are passed directly as parameters to the template + Values map[string]string `json:"values,omitempty" protobuf:"bytes,8,name=values"` } type GitDirectoryGeneratorItem struct { @@ -342,6 +393,10 @@ type SCMProviderGenerator struct { // Standard parameters. RequeueAfterSeconds *int64 `json:"requeueAfterSeconds,omitempty" protobuf:"varint,9,opt,name=requeueAfterSeconds"` Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,10,opt,name=template"` + + // Values contains key/value pairs which are passed directly as parameters to the template + Values map[string]string `json:"values,omitempty" protobuf:"bytes,11,name=values"` + AWSCodeCommit *SCMProviderGeneratorAWSCodeCommit `json:"awsCodeCommit,omitempty" protobuf:"bytes,12,opt,name=awsCodeCommit"` } // SCMProviderGeneratorGitea defines a connection info specific to Gitea. @@ -384,6 +439,8 @@ type SCMProviderGeneratorGitlab struct { TokenRef *SecretRef `json:"tokenRef,omitempty" protobuf:"bytes,4,opt,name=tokenRef"` // Scan all branches instead of just the default branch. AllBranches bool `json:"allBranches,omitempty" protobuf:"varint,5,opt,name=allBranches"` + // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false + Insecure bool `json:"insecure,omitempty" protobuf:"varint,6,opt,name=insecure"` } // SCMProviderGeneratorBitbucket defines connection info specific to Bitbucket Cloud (API version 2). @@ -424,6 +481,25 @@ type SCMProviderGeneratorAzureDevOps struct { AllBranches bool `json:"allBranches,omitempty" protobuf:"varint,9,opt,name=allBranches"` } +type TagFilter struct { + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// SCMProviderGeneratorAWSCodeCommit defines connection info specific to AWS CodeCommit. +type SCMProviderGeneratorAWSCodeCommit struct { + // TagFilters provides the tag filter(s) for repo discovery + TagFilters []*TagFilter `json:"tagFilters,omitempty" protobuf:"bytes,1,opt,name=tagFilters"` + // Role provides the AWS IAM role to assume, for cross-account repo discovery + // if not provided, AppSet controller will use its pod/node identity to discover. + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` + // Region provides the AWS region to discover repos. + // if not provided, AppSet controller will infer the current region from environment. + Region string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` + // Scan all branches instead of just the default branch. + AllBranches bool `json:"allBranches,omitempty" protobuf:"varint,4,opt,name=allBranches"` +} + // SCMProviderGeneratorFilter is a single repository filter. // If multiple filter types are set on a single struct, they will be AND'd together. All filters must // pass for a repo to be included. @@ -450,11 +526,14 @@ type PullRequestGenerator struct { // Filters for which pull requests should be considered. Filters []PullRequestGeneratorFilter `json:"filters,omitempty" protobuf:"bytes,5,rep,name=filters"` // Standard parameters. - RequeueAfterSeconds *int64 `json:"requeueAfterSeconds,omitempty" protobuf:"varint,6,opt,name=requeueAfterSeconds"` - Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,7,opt,name=template"` + RequeueAfterSeconds *int64 `json:"requeueAfterSeconds,omitempty" protobuf:"varint,6,opt,name=requeueAfterSeconds"` + Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,7,opt,name=template"` + Bitbucket *PullRequestGeneratorBitbucket `json:"bitbucket,omitempty" protobuf:"bytes,8,opt,name=bitbucket"` + // Additional provider to use and config for it. + AzureDevOps *PullRequestGeneratorAzureDevOps `json:"azuredevops,omitempty" protobuf:"bytes,9,opt,name=azuredevops"` } -// PullRequestGenerator defines connection info specific to Gitea. +// PullRequestGeneratorGitea defines connection info specific to Gitea. type PullRequestGeneratorGitea struct { // Gitea org or user to scan. Required. Owner string `json:"owner" protobuf:"bytes,1,opt,name=owner"` @@ -468,6 +547,22 @@ type PullRequestGeneratorGitea struct { Insecure bool `json:"insecure,omitempty" protobuf:"varint,5,opt,name=insecure"` } +// PullRequestGeneratorAzureDevOps defines connection info specific to AzureDevOps. +type PullRequestGeneratorAzureDevOps struct { + // Azure DevOps org to scan. Required. + Organization string `json:"organization" protobuf:"bytes,1,opt,name=organization"` + // Azure DevOps project name to scan. Required. + Project string `json:"project" protobuf:"bytes,2,opt,name=project"` + // Azure DevOps repo name to scan. Required. + Repo string `json:"repo" protobuf:"bytes,3,opt,name=repo"` + // The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/. + API string `json:"api,omitempty" protobuf:"bytes,4,opt,name=api"` + // Authentication token reference. + TokenRef *SecretRef `json:"tokenRef,omitempty" protobuf:"bytes,5,opt,name=tokenRef"` + // Labels is used to filter the PRs that you want to target + Labels []string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` +} + // PullRequestGenerator defines connection info specific to GitHub. type PullRequestGeneratorGithub struct { // GitHub org or user to scan. Required. @@ -496,9 +591,11 @@ type PullRequestGeneratorGitLab struct { Labels []string `json:"labels,omitempty" protobuf:"bytes,4,rep,name=labels"` // PullRequestState is an additional MRs filter to get only those with a certain state. Default: "" (all states) PullRequestState string `json:"pullRequestState,omitempty" protobuf:"bytes,5,rep,name=pullRequestState"` + // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false + Insecure bool `json:"insecure,omitempty" protobuf:"varint,6,opt,name=insecure"` } -// PullRequestGenerator defines connection info specific to BitbucketServer. +// PullRequestGeneratorBitbucketServer defines connection info specific to BitbucketServer. type PullRequestGeneratorBitbucketServer struct { // Project to scan. Required. Project string `json:"project" protobuf:"bytes,1,opt,name=project"` @@ -510,6 +607,26 @@ type PullRequestGeneratorBitbucketServer struct { BasicAuth *BasicAuthBitbucketServer `json:"basicAuth,omitempty" protobuf:"bytes,4,opt,name=basicAuth"` } +// PullRequestGeneratorBitbucket defines connection info specific to Bitbucket. +type PullRequestGeneratorBitbucket struct { + // Workspace to scan. Required. + Owner string `json:"owner" protobuf:"bytes,1,opt,name=owner"` + // Repo name to scan. Required. + Repo string `json:"repo" protobuf:"bytes,2,opt,name=repo"` + // The Bitbucket REST API URL to talk to. If blank, uses https://api.bitbucket.org/2.0. + API string `json:"api,omitempty" protobuf:"bytes,3,opt,name=api"` + // Credentials for Basic auth + BasicAuth *BasicAuthBitbucketServer `json:"basicAuth,omitempty" protobuf:"bytes,4,opt,name=basicAuth"` + // Credentials for AppToken (Bearer auth) + BearerToken *BearerTokenBitbucketCloud `json:"bearerToken,omitempty" protobuf:"bytes,5,opt,name=bearerToken"` +} + +// BearerTokenBitbucketCloud defines the Bearer token for BitBucket AppToken auth. +type BearerTokenBitbucketCloud struct { + // Password (or personal access token) reference. + TokenRef *SecretRef `json:"tokenRef" protobuf:"bytes,1,opt,name=tokenRef"` +} + // BasicAuthBitbucketServer defines the username/(password or personal access token) for Basic auth. type BasicAuthBitbucketServer struct { // Username for Basic auth @@ -522,7 +639,34 @@ type BasicAuthBitbucketServer struct { // If multiple filter types are set on a single struct, they will be AND'd together. All filters must // pass for a pull request to be included. type PullRequestGeneratorFilter struct { - BranchMatch *string `json:"branchMatch,omitempty" protobuf:"bytes,1,opt,name=branchMatch"` + BranchMatch *string `json:"branchMatch,omitempty" protobuf:"bytes,1,opt,name=branchMatch"` + TargetBranchMatch *string `json:"targetBranchMatch,omitempty" protobuf:"bytes,2,opt,name=targetBranchMatch"` +} + +type PluginConfigMapRef struct { + // Name of the ConfigMap + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +type PluginParameters map[string]apiextensionsv1.JSON + +type PluginInput struct { + // Parameters contains the information to pass to the plugin. It is a map. The keys must be strings, and the + // values can be any type. + Parameters PluginParameters `json:"parameters,omitempty" protobuf:"bytes,1,name=parameters"` +} + +// PluginGenerator defines connection info specific to Plugin. +type PluginGenerator struct { + ConfigMapRef PluginConfigMapRef `json:"configMapRef" protobuf:"bytes,1,name=configMapRef"` + Input PluginInput `json:"input,omitempty" protobuf:"bytes,2,name=input"` + // RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again. + RequeueAfterSeconds *int64 `json:"requeueAfterSeconds,omitempty" protobuf:"varint,3,opt,name=requeueAfterSeconds"` + Template ApplicationSetTemplate `json:"template,omitempty" protobuf:"bytes,4,name=template"` + + // Values contains key/value pairs which are passed directly as parameters to the template. These values will not be + // sent as parameters to the plugin. + Values map[string]string `json:"values,omitempty" protobuf:"bytes,5,name=values"` } // ApplicationSetStatus defines the observed state of ApplicationSet @@ -675,3 +819,14 @@ func (status *ApplicationSetStatus) SetApplicationStatus(newStatus ApplicationSe } status.ApplicationStatus = append(status.ApplicationStatus, newStatus) } + +// QualifiedName returns the full qualified name of the applicationset, including +// the name of the namespace it is created in delimited by a forward slash, +// i.e. / +func (a *ApplicationSet) QualifiedName() string { + if a.Namespace == "" { + return a.Name + } else { + return a.Namespace + "/" + a.Name + } +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go index 4410d283aa..8a788c206b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go @@ -17,6 +17,7 @@ import ( v12 "k8s.io/api/core/v1" v11 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" math "math" math_bits "math/bits" @@ -318,10 +319,38 @@ func (m *ApplicationMatchExpression) XXX_DiscardUnknown() { var xxx_messageInfo_ApplicationMatchExpression proto.InternalMessageInfo +func (m *ApplicationPreservedFields) Reset() { *m = ApplicationPreservedFields{} } +func (*ApplicationPreservedFields) ProtoMessage() {} +func (*ApplicationPreservedFields) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{10} +} +func (m *ApplicationPreservedFields) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplicationPreservedFields) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplicationPreservedFields) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplicationPreservedFields.Merge(m, src) +} +func (m *ApplicationPreservedFields) XXX_Size() int { + return m.Size() +} +func (m *ApplicationPreservedFields) XXX_DiscardUnknown() { + xxx_messageInfo_ApplicationPreservedFields.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplicationPreservedFields proto.InternalMessageInfo + func (m *ApplicationSet) Reset() { *m = ApplicationSet{} } func (*ApplicationSet) ProtoMessage() {} func (*ApplicationSet) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{10} + return fileDescriptor_030104ce3b95bcac, []int{11} } func (m *ApplicationSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -349,7 +378,7 @@ var xxx_messageInfo_ApplicationSet proto.InternalMessageInfo func (m *ApplicationSetApplicationStatus) Reset() { *m = ApplicationSetApplicationStatus{} } func (*ApplicationSetApplicationStatus) ProtoMessage() {} func (*ApplicationSetApplicationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{11} + return fileDescriptor_030104ce3b95bcac, []int{12} } func (m *ApplicationSetApplicationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -377,7 +406,7 @@ var xxx_messageInfo_ApplicationSetApplicationStatus proto.InternalMessageInfo func (m *ApplicationSetCondition) Reset() { *m = ApplicationSetCondition{} } func (*ApplicationSetCondition) ProtoMessage() {} func (*ApplicationSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{12} + return fileDescriptor_030104ce3b95bcac, []int{13} } func (m *ApplicationSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -405,7 +434,7 @@ var xxx_messageInfo_ApplicationSetCondition proto.InternalMessageInfo func (m *ApplicationSetGenerator) Reset() { *m = ApplicationSetGenerator{} } func (*ApplicationSetGenerator) ProtoMessage() {} func (*ApplicationSetGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{13} + return fileDescriptor_030104ce3b95bcac, []int{14} } func (m *ApplicationSetGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -433,7 +462,7 @@ var xxx_messageInfo_ApplicationSetGenerator proto.InternalMessageInfo func (m *ApplicationSetList) Reset() { *m = ApplicationSetList{} } func (*ApplicationSetList) ProtoMessage() {} func (*ApplicationSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{14} + return fileDescriptor_030104ce3b95bcac, []int{15} } func (m *ApplicationSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -461,7 +490,7 @@ var xxx_messageInfo_ApplicationSetList proto.InternalMessageInfo func (m *ApplicationSetNestedGenerator) Reset() { *m = ApplicationSetNestedGenerator{} } func (*ApplicationSetNestedGenerator) ProtoMessage() {} func (*ApplicationSetNestedGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{15} + return fileDescriptor_030104ce3b95bcac, []int{16} } func (m *ApplicationSetNestedGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -489,7 +518,7 @@ var xxx_messageInfo_ApplicationSetNestedGenerator proto.InternalMessageInfo func (m *ApplicationSetRolloutStep) Reset() { *m = ApplicationSetRolloutStep{} } func (*ApplicationSetRolloutStep) ProtoMessage() {} func (*ApplicationSetRolloutStep) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{16} + return fileDescriptor_030104ce3b95bcac, []int{17} } func (m *ApplicationSetRolloutStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -517,7 +546,7 @@ var xxx_messageInfo_ApplicationSetRolloutStep proto.InternalMessageInfo func (m *ApplicationSetRolloutStrategy) Reset() { *m = ApplicationSetRolloutStrategy{} } func (*ApplicationSetRolloutStrategy) ProtoMessage() {} func (*ApplicationSetRolloutStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{17} + return fileDescriptor_030104ce3b95bcac, []int{18} } func (m *ApplicationSetRolloutStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -545,7 +574,7 @@ var xxx_messageInfo_ApplicationSetRolloutStrategy proto.InternalMessageInfo func (m *ApplicationSetSpec) Reset() { *m = ApplicationSetSpec{} } func (*ApplicationSetSpec) ProtoMessage() {} func (*ApplicationSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{18} + return fileDescriptor_030104ce3b95bcac, []int{19} } func (m *ApplicationSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -573,7 +602,7 @@ var xxx_messageInfo_ApplicationSetSpec proto.InternalMessageInfo func (m *ApplicationSetStatus) Reset() { *m = ApplicationSetStatus{} } func (*ApplicationSetStatus) ProtoMessage() {} func (*ApplicationSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{19} + return fileDescriptor_030104ce3b95bcac, []int{20} } func (m *ApplicationSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -601,7 +630,7 @@ var xxx_messageInfo_ApplicationSetStatus proto.InternalMessageInfo func (m *ApplicationSetStrategy) Reset() { *m = ApplicationSetStrategy{} } func (*ApplicationSetStrategy) ProtoMessage() {} func (*ApplicationSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{20} + return fileDescriptor_030104ce3b95bcac, []int{21} } func (m *ApplicationSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -629,7 +658,7 @@ var xxx_messageInfo_ApplicationSetStrategy proto.InternalMessageInfo func (m *ApplicationSetSyncPolicy) Reset() { *m = ApplicationSetSyncPolicy{} } func (*ApplicationSetSyncPolicy) ProtoMessage() {} func (*ApplicationSetSyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{21} + return fileDescriptor_030104ce3b95bcac, []int{22} } func (m *ApplicationSetSyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -657,7 +686,7 @@ var xxx_messageInfo_ApplicationSetSyncPolicy proto.InternalMessageInfo func (m *ApplicationSetTemplate) Reset() { *m = ApplicationSetTemplate{} } func (*ApplicationSetTemplate) ProtoMessage() {} func (*ApplicationSetTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{22} + return fileDescriptor_030104ce3b95bcac, []int{23} } func (m *ApplicationSetTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -685,7 +714,7 @@ var xxx_messageInfo_ApplicationSetTemplate proto.InternalMessageInfo func (m *ApplicationSetTemplateMeta) Reset() { *m = ApplicationSetTemplateMeta{} } func (*ApplicationSetTemplateMeta) ProtoMessage() {} func (*ApplicationSetTemplateMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{23} + return fileDescriptor_030104ce3b95bcac, []int{24} } func (m *ApplicationSetTemplateMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -713,7 +742,7 @@ var xxx_messageInfo_ApplicationSetTemplateMeta proto.InternalMessageInfo func (m *ApplicationSetTerminalGenerator) Reset() { *m = ApplicationSetTerminalGenerator{} } func (*ApplicationSetTerminalGenerator) ProtoMessage() {} func (*ApplicationSetTerminalGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{24} + return fileDescriptor_030104ce3b95bcac, []int{25} } func (m *ApplicationSetTerminalGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -741,7 +770,7 @@ var xxx_messageInfo_ApplicationSetTerminalGenerator proto.InternalMessageInfo func (m *ApplicationSource) Reset() { *m = ApplicationSource{} } func (*ApplicationSource) ProtoMessage() {} func (*ApplicationSource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{25} + return fileDescriptor_030104ce3b95bcac, []int{26} } func (m *ApplicationSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -769,7 +798,7 @@ var xxx_messageInfo_ApplicationSource proto.InternalMessageInfo func (m *ApplicationSourceDirectory) Reset() { *m = ApplicationSourceDirectory{} } func (*ApplicationSourceDirectory) ProtoMessage() {} func (*ApplicationSourceDirectory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{26} + return fileDescriptor_030104ce3b95bcac, []int{27} } func (m *ApplicationSourceDirectory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -797,7 +826,7 @@ var xxx_messageInfo_ApplicationSourceDirectory proto.InternalMessageInfo func (m *ApplicationSourceHelm) Reset() { *m = ApplicationSourceHelm{} } func (*ApplicationSourceHelm) ProtoMessage() {} func (*ApplicationSourceHelm) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{27} + return fileDescriptor_030104ce3b95bcac, []int{28} } func (m *ApplicationSourceHelm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -825,7 +854,7 @@ var xxx_messageInfo_ApplicationSourceHelm proto.InternalMessageInfo func (m *ApplicationSourceJsonnet) Reset() { *m = ApplicationSourceJsonnet{} } func (*ApplicationSourceJsonnet) ProtoMessage() {} func (*ApplicationSourceJsonnet) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{28} + return fileDescriptor_030104ce3b95bcac, []int{29} } func (m *ApplicationSourceJsonnet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -853,7 +882,7 @@ var xxx_messageInfo_ApplicationSourceJsonnet proto.InternalMessageInfo func (m *ApplicationSourceKustomize) Reset() { *m = ApplicationSourceKustomize{} } func (*ApplicationSourceKustomize) ProtoMessage() {} func (*ApplicationSourceKustomize) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{29} + return fileDescriptor_030104ce3b95bcac, []int{30} } func (m *ApplicationSourceKustomize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -881,7 +910,7 @@ var xxx_messageInfo_ApplicationSourceKustomize proto.InternalMessageInfo func (m *ApplicationSourcePlugin) Reset() { *m = ApplicationSourcePlugin{} } func (*ApplicationSourcePlugin) ProtoMessage() {} func (*ApplicationSourcePlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{30} + return fileDescriptor_030104ce3b95bcac, []int{31} } func (m *ApplicationSourcePlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -909,7 +938,7 @@ var xxx_messageInfo_ApplicationSourcePlugin proto.InternalMessageInfo func (m *ApplicationSourcePluginParameter) Reset() { *m = ApplicationSourcePluginParameter{} } func (*ApplicationSourcePluginParameter) ProtoMessage() {} func (*ApplicationSourcePluginParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{31} + return fileDescriptor_030104ce3b95bcac, []int{32} } func (m *ApplicationSourcePluginParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -937,7 +966,7 @@ var xxx_messageInfo_ApplicationSourcePluginParameter proto.InternalMessageInfo func (m *ApplicationSpec) Reset() { *m = ApplicationSpec{} } func (*ApplicationSpec) ProtoMessage() {} func (*ApplicationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{32} + return fileDescriptor_030104ce3b95bcac, []int{33} } func (m *ApplicationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -965,7 +994,7 @@ var xxx_messageInfo_ApplicationSpec proto.InternalMessageInfo func (m *ApplicationStatus) Reset() { *m = ApplicationStatus{} } func (*ApplicationStatus) ProtoMessage() {} func (*ApplicationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{33} + return fileDescriptor_030104ce3b95bcac, []int{34} } func (m *ApplicationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -993,7 +1022,7 @@ var xxx_messageInfo_ApplicationStatus proto.InternalMessageInfo func (m *ApplicationSummary) Reset() { *m = ApplicationSummary{} } func (*ApplicationSummary) ProtoMessage() {} func (*ApplicationSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{34} + return fileDescriptor_030104ce3b95bcac, []int{35} } func (m *ApplicationSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1021,7 +1050,7 @@ var xxx_messageInfo_ApplicationSummary proto.InternalMessageInfo func (m *ApplicationTree) Reset() { *m = ApplicationTree{} } func (*ApplicationTree) ProtoMessage() {} func (*ApplicationTree) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{35} + return fileDescriptor_030104ce3b95bcac, []int{36} } func (m *ApplicationTree) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1049,7 +1078,7 @@ var xxx_messageInfo_ApplicationTree proto.InternalMessageInfo func (m *ApplicationWatchEvent) Reset() { *m = ApplicationWatchEvent{} } func (*ApplicationWatchEvent) ProtoMessage() {} func (*ApplicationWatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{36} + return fileDescriptor_030104ce3b95bcac, []int{37} } func (m *ApplicationWatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1077,7 +1106,7 @@ var xxx_messageInfo_ApplicationWatchEvent proto.InternalMessageInfo func (m *Backoff) Reset() { *m = Backoff{} } func (*Backoff) ProtoMessage() {} func (*Backoff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{37} + return fileDescriptor_030104ce3b95bcac, []int{38} } func (m *Backoff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1105,7 +1134,7 @@ var xxx_messageInfo_Backoff proto.InternalMessageInfo func (m *BasicAuthBitbucketServer) Reset() { *m = BasicAuthBitbucketServer{} } func (*BasicAuthBitbucketServer) ProtoMessage() {} func (*BasicAuthBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{38} + return fileDescriptor_030104ce3b95bcac, []int{39} } func (m *BasicAuthBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1130,10 +1159,66 @@ func (m *BasicAuthBitbucketServer) XXX_DiscardUnknown() { var xxx_messageInfo_BasicAuthBitbucketServer proto.InternalMessageInfo +func (m *BearerTokenBitbucketCloud) Reset() { *m = BearerTokenBitbucketCloud{} } +func (*BearerTokenBitbucketCloud) ProtoMessage() {} +func (*BearerTokenBitbucketCloud) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{40} +} +func (m *BearerTokenBitbucketCloud) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BearerTokenBitbucketCloud) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BearerTokenBitbucketCloud) XXX_Merge(src proto.Message) { + xxx_messageInfo_BearerTokenBitbucketCloud.Merge(m, src) +} +func (m *BearerTokenBitbucketCloud) XXX_Size() int { + return m.Size() +} +func (m *BearerTokenBitbucketCloud) XXX_DiscardUnknown() { + xxx_messageInfo_BearerTokenBitbucketCloud.DiscardUnknown(m) +} + +var xxx_messageInfo_BearerTokenBitbucketCloud proto.InternalMessageInfo + +func (m *ChartDetails) Reset() { *m = ChartDetails{} } +func (*ChartDetails) ProtoMessage() {} +func (*ChartDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{41} +} +func (m *ChartDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChartDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ChartDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChartDetails.Merge(m, src) +} +func (m *ChartDetails) XXX_Size() int { + return m.Size() +} +func (m *ChartDetails) XXX_DiscardUnknown() { + xxx_messageInfo_ChartDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_ChartDetails proto.InternalMessageInfo + func (m *Cluster) Reset() { *m = Cluster{} } func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{39} + return fileDescriptor_030104ce3b95bcac, []int{42} } func (m *Cluster) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1161,7 +1246,7 @@ var xxx_messageInfo_Cluster proto.InternalMessageInfo func (m *ClusterCacheInfo) Reset() { *m = ClusterCacheInfo{} } func (*ClusterCacheInfo) ProtoMessage() {} func (*ClusterCacheInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{40} + return fileDescriptor_030104ce3b95bcac, []int{43} } func (m *ClusterCacheInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1189,7 +1274,7 @@ var xxx_messageInfo_ClusterCacheInfo proto.InternalMessageInfo func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } func (*ClusterConfig) ProtoMessage() {} func (*ClusterConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{41} + return fileDescriptor_030104ce3b95bcac, []int{44} } func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1217,7 +1302,7 @@ var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo func (m *ClusterGenerator) Reset() { *m = ClusterGenerator{} } func (*ClusterGenerator) ProtoMessage() {} func (*ClusterGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{42} + return fileDescriptor_030104ce3b95bcac, []int{45} } func (m *ClusterGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1245,7 +1330,7 @@ var xxx_messageInfo_ClusterGenerator proto.InternalMessageInfo func (m *ClusterInfo) Reset() { *m = ClusterInfo{} } func (*ClusterInfo) ProtoMessage() {} func (*ClusterInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{43} + return fileDescriptor_030104ce3b95bcac, []int{46} } func (m *ClusterInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1273,7 +1358,7 @@ var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo func (m *ClusterList) Reset() { *m = ClusterList{} } func (*ClusterList) ProtoMessage() {} func (*ClusterList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{44} + return fileDescriptor_030104ce3b95bcac, []int{47} } func (m *ClusterList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1301,7 +1386,7 @@ var xxx_messageInfo_ClusterList proto.InternalMessageInfo func (m *Command) Reset() { *m = Command{} } func (*Command) ProtoMessage() {} func (*Command) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{45} + return fileDescriptor_030104ce3b95bcac, []int{48} } func (m *Command) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1329,7 +1414,7 @@ var xxx_messageInfo_Command proto.InternalMessageInfo func (m *ComparedTo) Reset() { *m = ComparedTo{} } func (*ComparedTo) ProtoMessage() {} func (*ComparedTo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{46} + return fileDescriptor_030104ce3b95bcac, []int{49} } func (m *ComparedTo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1357,7 +1442,7 @@ var xxx_messageInfo_ComparedTo proto.InternalMessageInfo func (m *ComponentParameter) Reset() { *m = ComponentParameter{} } func (*ComponentParameter) ProtoMessage() {} func (*ComponentParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{47} + return fileDescriptor_030104ce3b95bcac, []int{50} } func (m *ComponentParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1385,7 +1470,7 @@ var xxx_messageInfo_ComponentParameter proto.InternalMessageInfo func (m *ConfigManagementPlugin) Reset() { *m = ConfigManagementPlugin{} } func (*ConfigManagementPlugin) ProtoMessage() {} func (*ConfigManagementPlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{48} + return fileDescriptor_030104ce3b95bcac, []int{51} } func (m *ConfigManagementPlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1413,7 +1498,7 @@ var xxx_messageInfo_ConfigManagementPlugin proto.InternalMessageInfo func (m *ConnectionState) Reset() { *m = ConnectionState{} } func (*ConnectionState) ProtoMessage() {} func (*ConnectionState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{49} + return fileDescriptor_030104ce3b95bcac, []int{52} } func (m *ConnectionState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1441,7 +1526,7 @@ var xxx_messageInfo_ConnectionState proto.InternalMessageInfo func (m *DuckTypeGenerator) Reset() { *m = DuckTypeGenerator{} } func (*DuckTypeGenerator) ProtoMessage() {} func (*DuckTypeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{50} + return fileDescriptor_030104ce3b95bcac, []int{53} } func (m *DuckTypeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1469,7 +1554,7 @@ var xxx_messageInfo_DuckTypeGenerator proto.InternalMessageInfo func (m *EnvEntry) Reset() { *m = EnvEntry{} } func (*EnvEntry) ProtoMessage() {} func (*EnvEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{51} + return fileDescriptor_030104ce3b95bcac, []int{54} } func (m *EnvEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1497,7 +1582,7 @@ var xxx_messageInfo_EnvEntry proto.InternalMessageInfo func (m *ExecProviderConfig) Reset() { *m = ExecProviderConfig{} } func (*ExecProviderConfig) ProtoMessage() {} func (*ExecProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{52} + return fileDescriptor_030104ce3b95bcac, []int{55} } func (m *ExecProviderConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1525,7 +1610,7 @@ var xxx_messageInfo_ExecProviderConfig proto.InternalMessageInfo func (m *GitDirectoryGeneratorItem) Reset() { *m = GitDirectoryGeneratorItem{} } func (*GitDirectoryGeneratorItem) ProtoMessage() {} func (*GitDirectoryGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{53} + return fileDescriptor_030104ce3b95bcac, []int{56} } func (m *GitDirectoryGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1553,7 +1638,7 @@ var xxx_messageInfo_GitDirectoryGeneratorItem proto.InternalMessageInfo func (m *GitFileGeneratorItem) Reset() { *m = GitFileGeneratorItem{} } func (*GitFileGeneratorItem) ProtoMessage() {} func (*GitFileGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{54} + return fileDescriptor_030104ce3b95bcac, []int{57} } func (m *GitFileGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1581,7 +1666,7 @@ var xxx_messageInfo_GitFileGeneratorItem proto.InternalMessageInfo func (m *GitGenerator) Reset() { *m = GitGenerator{} } func (*GitGenerator) ProtoMessage() {} func (*GitGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{55} + return fileDescriptor_030104ce3b95bcac, []int{58} } func (m *GitGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1609,7 +1694,7 @@ var xxx_messageInfo_GitGenerator proto.InternalMessageInfo func (m *GnuPGPublicKey) Reset() { *m = GnuPGPublicKey{} } func (*GnuPGPublicKey) ProtoMessage() {} func (*GnuPGPublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{56} + return fileDescriptor_030104ce3b95bcac, []int{59} } func (m *GnuPGPublicKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1637,7 +1722,7 @@ var xxx_messageInfo_GnuPGPublicKey proto.InternalMessageInfo func (m *GnuPGPublicKeyList) Reset() { *m = GnuPGPublicKeyList{} } func (*GnuPGPublicKeyList) ProtoMessage() {} func (*GnuPGPublicKeyList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{57} + return fileDescriptor_030104ce3b95bcac, []int{60} } func (m *GnuPGPublicKeyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1665,7 +1750,7 @@ var xxx_messageInfo_GnuPGPublicKeyList proto.InternalMessageInfo func (m *HealthStatus) Reset() { *m = HealthStatus{} } func (*HealthStatus) ProtoMessage() {} func (*HealthStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{58} + return fileDescriptor_030104ce3b95bcac, []int{61} } func (m *HealthStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1693,7 +1778,7 @@ var xxx_messageInfo_HealthStatus proto.InternalMessageInfo func (m *HelmFileParameter) Reset() { *m = HelmFileParameter{} } func (*HelmFileParameter) ProtoMessage() {} func (*HelmFileParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{59} + return fileDescriptor_030104ce3b95bcac, []int{62} } func (m *HelmFileParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1721,7 +1806,7 @@ var xxx_messageInfo_HelmFileParameter proto.InternalMessageInfo func (m *HelmOptions) Reset() { *m = HelmOptions{} } func (*HelmOptions) ProtoMessage() {} func (*HelmOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{60} + return fileDescriptor_030104ce3b95bcac, []int{63} } func (m *HelmOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1749,7 +1834,7 @@ var xxx_messageInfo_HelmOptions proto.InternalMessageInfo func (m *HelmParameter) Reset() { *m = HelmParameter{} } func (*HelmParameter) ProtoMessage() {} func (*HelmParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{61} + return fileDescriptor_030104ce3b95bcac, []int{64} } func (m *HelmParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1777,7 +1862,7 @@ var xxx_messageInfo_HelmParameter proto.InternalMessageInfo func (m *HostInfo) Reset() { *m = HostInfo{} } func (*HostInfo) ProtoMessage() {} func (*HostInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{62} + return fileDescriptor_030104ce3b95bcac, []int{65} } func (m *HostInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1805,7 +1890,7 @@ var xxx_messageInfo_HostInfo proto.InternalMessageInfo func (m *HostResourceInfo) Reset() { *m = HostResourceInfo{} } func (*HostResourceInfo) ProtoMessage() {} func (*HostResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{63} + return fileDescriptor_030104ce3b95bcac, []int{66} } func (m *HostResourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1833,7 +1918,7 @@ var xxx_messageInfo_HostResourceInfo proto.InternalMessageInfo func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{64} + return fileDescriptor_030104ce3b95bcac, []int{67} } func (m *Info) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1861,7 +1946,7 @@ var xxx_messageInfo_Info proto.InternalMessageInfo func (m *InfoItem) Reset() { *m = InfoItem{} } func (*InfoItem) ProtoMessage() {} func (*InfoItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{65} + return fileDescriptor_030104ce3b95bcac, []int{68} } func (m *InfoItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1889,7 +1974,7 @@ var xxx_messageInfo_InfoItem proto.InternalMessageInfo func (m *JWTToken) Reset() { *m = JWTToken{} } func (*JWTToken) ProtoMessage() {} func (*JWTToken) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{66} + return fileDescriptor_030104ce3b95bcac, []int{69} } func (m *JWTToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1917,7 +2002,7 @@ var xxx_messageInfo_JWTToken proto.InternalMessageInfo func (m *JWTTokens) Reset() { *m = JWTTokens{} } func (*JWTTokens) ProtoMessage() {} func (*JWTTokens) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{67} + return fileDescriptor_030104ce3b95bcac, []int{70} } func (m *JWTTokens) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1945,7 +2030,7 @@ var xxx_messageInfo_JWTTokens proto.InternalMessageInfo func (m *JsonnetVar) Reset() { *m = JsonnetVar{} } func (*JsonnetVar) ProtoMessage() {} func (*JsonnetVar) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{68} + return fileDescriptor_030104ce3b95bcac, []int{71} } func (m *JsonnetVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1973,7 +2058,7 @@ var xxx_messageInfo_JsonnetVar proto.InternalMessageInfo func (m *KnownTypeField) Reset() { *m = KnownTypeField{} } func (*KnownTypeField) ProtoMessage() {} func (*KnownTypeField) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{69} + return fileDescriptor_030104ce3b95bcac, []int{72} } func (m *KnownTypeField) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2001,7 +2086,7 @@ var xxx_messageInfo_KnownTypeField proto.InternalMessageInfo func (m *KustomizeOptions) Reset() { *m = KustomizeOptions{} } func (*KustomizeOptions) ProtoMessage() {} func (*KustomizeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{70} + return fileDescriptor_030104ce3b95bcac, []int{73} } func (m *KustomizeOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2026,10 +2111,38 @@ func (m *KustomizeOptions) XXX_DiscardUnknown() { var xxx_messageInfo_KustomizeOptions proto.InternalMessageInfo +func (m *KustomizeReplica) Reset() { *m = KustomizeReplica{} } +func (*KustomizeReplica) ProtoMessage() {} +func (*KustomizeReplica) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{74} +} +func (m *KustomizeReplica) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KustomizeReplica) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KustomizeReplica) XXX_Merge(src proto.Message) { + xxx_messageInfo_KustomizeReplica.Merge(m, src) +} +func (m *KustomizeReplica) XXX_Size() int { + return m.Size() +} +func (m *KustomizeReplica) XXX_DiscardUnknown() { + xxx_messageInfo_KustomizeReplica.DiscardUnknown(m) +} + +var xxx_messageInfo_KustomizeReplica proto.InternalMessageInfo + func (m *ListGenerator) Reset() { *m = ListGenerator{} } func (*ListGenerator) ProtoMessage() {} func (*ListGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{71} + return fileDescriptor_030104ce3b95bcac, []int{75} } func (m *ListGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2057,7 +2170,7 @@ var xxx_messageInfo_ListGenerator proto.InternalMessageInfo func (m *ManagedNamespaceMetadata) Reset() { *m = ManagedNamespaceMetadata{} } func (*ManagedNamespaceMetadata) ProtoMessage() {} func (*ManagedNamespaceMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{72} + return fileDescriptor_030104ce3b95bcac, []int{76} } func (m *ManagedNamespaceMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2085,7 +2198,7 @@ var xxx_messageInfo_ManagedNamespaceMetadata proto.InternalMessageInfo func (m *MatrixGenerator) Reset() { *m = MatrixGenerator{} } func (*MatrixGenerator) ProtoMessage() {} func (*MatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{73} + return fileDescriptor_030104ce3b95bcac, []int{77} } func (m *MatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2113,7 +2226,7 @@ var xxx_messageInfo_MatrixGenerator proto.InternalMessageInfo func (m *MergeGenerator) Reset() { *m = MergeGenerator{} } func (*MergeGenerator) ProtoMessage() {} func (*MergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{74} + return fileDescriptor_030104ce3b95bcac, []int{78} } func (m *MergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2141,7 +2254,7 @@ var xxx_messageInfo_MergeGenerator proto.InternalMessageInfo func (m *NestedMatrixGenerator) Reset() { *m = NestedMatrixGenerator{} } func (*NestedMatrixGenerator) ProtoMessage() {} func (*NestedMatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{75} + return fileDescriptor_030104ce3b95bcac, []int{79} } func (m *NestedMatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2169,7 +2282,7 @@ var xxx_messageInfo_NestedMatrixGenerator proto.InternalMessageInfo func (m *NestedMergeGenerator) Reset() { *m = NestedMergeGenerator{} } func (*NestedMergeGenerator) ProtoMessage() {} func (*NestedMergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{76} + return fileDescriptor_030104ce3b95bcac, []int{80} } func (m *NestedMergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2197,7 +2310,7 @@ var xxx_messageInfo_NestedMergeGenerator proto.InternalMessageInfo func (m *Operation) Reset() { *m = Operation{} } func (*Operation) ProtoMessage() {} func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{77} + return fileDescriptor_030104ce3b95bcac, []int{81} } func (m *Operation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2225,7 +2338,7 @@ var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *OperationInitiator) Reset() { *m = OperationInitiator{} } func (*OperationInitiator) ProtoMessage() {} func (*OperationInitiator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{78} + return fileDescriptor_030104ce3b95bcac, []int{82} } func (m *OperationInitiator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2253,7 +2366,7 @@ var xxx_messageInfo_OperationInitiator proto.InternalMessageInfo func (m *OperationState) Reset() { *m = OperationState{} } func (*OperationState) ProtoMessage() {} func (*OperationState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{79} + return fileDescriptor_030104ce3b95bcac, []int{83} } func (m *OperationState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2278,10 +2391,66 @@ func (m *OperationState) XXX_DiscardUnknown() { var xxx_messageInfo_OperationState proto.InternalMessageInfo +func (m *OptionalArray) Reset() { *m = OptionalArray{} } +func (*OptionalArray) ProtoMessage() {} +func (*OptionalArray) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{84} +} +func (m *OptionalArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalArray.Merge(m, src) +} +func (m *OptionalArray) XXX_Size() int { + return m.Size() +} +func (m *OptionalArray) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalArray.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalArray proto.InternalMessageInfo + +func (m *OptionalMap) Reset() { *m = OptionalMap{} } +func (*OptionalMap) ProtoMessage() {} +func (*OptionalMap) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{85} +} +func (m *OptionalMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalMap.Merge(m, src) +} +func (m *OptionalMap) XXX_Size() int { + return m.Size() +} +func (m *OptionalMap) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalMap.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalMap proto.InternalMessageInfo + func (m *OrphanedResourceKey) Reset() { *m = OrphanedResourceKey{} } func (*OrphanedResourceKey) ProtoMessage() {} func (*OrphanedResourceKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{80} + return fileDescriptor_030104ce3b95bcac, []int{86} } func (m *OrphanedResourceKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2309,7 +2478,7 @@ var xxx_messageInfo_OrphanedResourceKey proto.InternalMessageInfo func (m *OrphanedResourcesMonitorSettings) Reset() { *m = OrphanedResourcesMonitorSettings{} } func (*OrphanedResourcesMonitorSettings) ProtoMessage() {} func (*OrphanedResourcesMonitorSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{81} + return fileDescriptor_030104ce3b95bcac, []int{87} } func (m *OrphanedResourcesMonitorSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2337,7 +2506,7 @@ var xxx_messageInfo_OrphanedResourcesMonitorSettings proto.InternalMessageInfo func (m *OverrideIgnoreDiff) Reset() { *m = OverrideIgnoreDiff{} } func (*OverrideIgnoreDiff) ProtoMessage() {} func (*OverrideIgnoreDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{82} + return fileDescriptor_030104ce3b95bcac, []int{88} } func (m *OverrideIgnoreDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2362,10 +2531,94 @@ func (m *OverrideIgnoreDiff) XXX_DiscardUnknown() { var xxx_messageInfo_OverrideIgnoreDiff proto.InternalMessageInfo +func (m *PluginConfigMapRef) Reset() { *m = PluginConfigMapRef{} } +func (*PluginConfigMapRef) ProtoMessage() {} +func (*PluginConfigMapRef) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{89} +} +func (m *PluginConfigMapRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginConfigMapRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PluginConfigMapRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginConfigMapRef.Merge(m, src) +} +func (m *PluginConfigMapRef) XXX_Size() int { + return m.Size() +} +func (m *PluginConfigMapRef) XXX_DiscardUnknown() { + xxx_messageInfo_PluginConfigMapRef.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginConfigMapRef proto.InternalMessageInfo + +func (m *PluginGenerator) Reset() { *m = PluginGenerator{} } +func (*PluginGenerator) ProtoMessage() {} +func (*PluginGenerator) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{90} +} +func (m *PluginGenerator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginGenerator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PluginGenerator) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginGenerator.Merge(m, src) +} +func (m *PluginGenerator) XXX_Size() int { + return m.Size() +} +func (m *PluginGenerator) XXX_DiscardUnknown() { + xxx_messageInfo_PluginGenerator.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginGenerator proto.InternalMessageInfo + +func (m *PluginInput) Reset() { *m = PluginInput{} } +func (*PluginInput) ProtoMessage() {} +func (*PluginInput) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{91} +} +func (m *PluginInput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PluginInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PluginInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginInput.Merge(m, src) +} +func (m *PluginInput) XXX_Size() int { + return m.Size() +} +func (m *PluginInput) XXX_DiscardUnknown() { + xxx_messageInfo_PluginInput.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginInput proto.InternalMessageInfo + func (m *ProjectRole) Reset() { *m = ProjectRole{} } func (*ProjectRole) ProtoMessage() {} func (*ProjectRole) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{83} + return fileDescriptor_030104ce3b95bcac, []int{92} } func (m *ProjectRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2393,7 +2646,7 @@ var xxx_messageInfo_ProjectRole proto.InternalMessageInfo func (m *PullRequestGenerator) Reset() { *m = PullRequestGenerator{} } func (*PullRequestGenerator) ProtoMessage() {} func (*PullRequestGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{84} + return fileDescriptor_030104ce3b95bcac, []int{93} } func (m *PullRequestGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2418,10 +2671,66 @@ func (m *PullRequestGenerator) XXX_DiscardUnknown() { var xxx_messageInfo_PullRequestGenerator proto.InternalMessageInfo +func (m *PullRequestGeneratorAzureDevOps) Reset() { *m = PullRequestGeneratorAzureDevOps{} } +func (*PullRequestGeneratorAzureDevOps) ProtoMessage() {} +func (*PullRequestGeneratorAzureDevOps) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{94} +} +func (m *PullRequestGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PullRequestGeneratorAzureDevOps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PullRequestGeneratorAzureDevOps) XXX_Merge(src proto.Message) { + xxx_messageInfo_PullRequestGeneratorAzureDevOps.Merge(m, src) +} +func (m *PullRequestGeneratorAzureDevOps) XXX_Size() int { + return m.Size() +} +func (m *PullRequestGeneratorAzureDevOps) XXX_DiscardUnknown() { + xxx_messageInfo_PullRequestGeneratorAzureDevOps.DiscardUnknown(m) +} + +var xxx_messageInfo_PullRequestGeneratorAzureDevOps proto.InternalMessageInfo + +func (m *PullRequestGeneratorBitbucket) Reset() { *m = PullRequestGeneratorBitbucket{} } +func (*PullRequestGeneratorBitbucket) ProtoMessage() {} +func (*PullRequestGeneratorBitbucket) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{95} +} +func (m *PullRequestGeneratorBitbucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PullRequestGeneratorBitbucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PullRequestGeneratorBitbucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_PullRequestGeneratorBitbucket.Merge(m, src) +} +func (m *PullRequestGeneratorBitbucket) XXX_Size() int { + return m.Size() +} +func (m *PullRequestGeneratorBitbucket) XXX_DiscardUnknown() { + xxx_messageInfo_PullRequestGeneratorBitbucket.DiscardUnknown(m) +} + +var xxx_messageInfo_PullRequestGeneratorBitbucket proto.InternalMessageInfo + func (m *PullRequestGeneratorBitbucketServer) Reset() { *m = PullRequestGeneratorBitbucketServer{} } func (*PullRequestGeneratorBitbucketServer) ProtoMessage() {} func (*PullRequestGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{85} + return fileDescriptor_030104ce3b95bcac, []int{96} } func (m *PullRequestGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2449,7 +2758,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucketServer proto.InternalMessageInf func (m *PullRequestGeneratorFilter) Reset() { *m = PullRequestGeneratorFilter{} } func (*PullRequestGeneratorFilter) ProtoMessage() {} func (*PullRequestGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{86} + return fileDescriptor_030104ce3b95bcac, []int{97} } func (m *PullRequestGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2477,7 +2786,7 @@ var xxx_messageInfo_PullRequestGeneratorFilter proto.InternalMessageInfo func (m *PullRequestGeneratorGitLab) Reset() { *m = PullRequestGeneratorGitLab{} } func (*PullRequestGeneratorGitLab) ProtoMessage() {} func (*PullRequestGeneratorGitLab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{87} + return fileDescriptor_030104ce3b95bcac, []int{98} } func (m *PullRequestGeneratorGitLab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2505,7 +2814,7 @@ var xxx_messageInfo_PullRequestGeneratorGitLab proto.InternalMessageInfo func (m *PullRequestGeneratorGitea) Reset() { *m = PullRequestGeneratorGitea{} } func (*PullRequestGeneratorGitea) ProtoMessage() {} func (*PullRequestGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{88} + return fileDescriptor_030104ce3b95bcac, []int{99} } func (m *PullRequestGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2533,7 +2842,7 @@ var xxx_messageInfo_PullRequestGeneratorGitea proto.InternalMessageInfo func (m *PullRequestGeneratorGithub) Reset() { *m = PullRequestGeneratorGithub{} } func (*PullRequestGeneratorGithub) ProtoMessage() {} func (*PullRequestGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{89} + return fileDescriptor_030104ce3b95bcac, []int{100} } func (m *PullRequestGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2561,7 +2870,7 @@ var xxx_messageInfo_PullRequestGeneratorGithub proto.InternalMessageInfo func (m *RefTarget) Reset() { *m = RefTarget{} } func (*RefTarget) ProtoMessage() {} func (*RefTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{90} + return fileDescriptor_030104ce3b95bcac, []int{101} } func (m *RefTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2589,7 +2898,7 @@ var xxx_messageInfo_RefTarget proto.InternalMessageInfo func (m *RepoCreds) Reset() { *m = RepoCreds{} } func (*RepoCreds) ProtoMessage() {} func (*RepoCreds) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{91} + return fileDescriptor_030104ce3b95bcac, []int{102} } func (m *RepoCreds) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2617,7 +2926,7 @@ var xxx_messageInfo_RepoCreds proto.InternalMessageInfo func (m *RepoCredsList) Reset() { *m = RepoCredsList{} } func (*RepoCredsList) ProtoMessage() {} func (*RepoCredsList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{92} + return fileDescriptor_030104ce3b95bcac, []int{103} } func (m *RepoCredsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2645,7 +2954,7 @@ var xxx_messageInfo_RepoCredsList proto.InternalMessageInfo func (m *Repository) Reset() { *m = Repository{} } func (*Repository) ProtoMessage() {} func (*Repository) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{93} + return fileDescriptor_030104ce3b95bcac, []int{104} } func (m *Repository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2673,7 +2982,7 @@ var xxx_messageInfo_Repository proto.InternalMessageInfo func (m *RepositoryCertificate) Reset() { *m = RepositoryCertificate{} } func (*RepositoryCertificate) ProtoMessage() {} func (*RepositoryCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{94} + return fileDescriptor_030104ce3b95bcac, []int{105} } func (m *RepositoryCertificate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2701,7 +3010,7 @@ var xxx_messageInfo_RepositoryCertificate proto.InternalMessageInfo func (m *RepositoryCertificateList) Reset() { *m = RepositoryCertificateList{} } func (*RepositoryCertificateList) ProtoMessage() {} func (*RepositoryCertificateList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{95} + return fileDescriptor_030104ce3b95bcac, []int{106} } func (m *RepositoryCertificateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2729,7 +3038,7 @@ var xxx_messageInfo_RepositoryCertificateList proto.InternalMessageInfo func (m *RepositoryList) Reset() { *m = RepositoryList{} } func (*RepositoryList) ProtoMessage() {} func (*RepositoryList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{96} + return fileDescriptor_030104ce3b95bcac, []int{107} } func (m *RepositoryList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2757,7 +3066,7 @@ var xxx_messageInfo_RepositoryList proto.InternalMessageInfo func (m *ResourceAction) Reset() { *m = ResourceAction{} } func (*ResourceAction) ProtoMessage() {} func (*ResourceAction) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{97} + return fileDescriptor_030104ce3b95bcac, []int{108} } func (m *ResourceAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2785,7 +3094,7 @@ var xxx_messageInfo_ResourceAction proto.InternalMessageInfo func (m *ResourceActionDefinition) Reset() { *m = ResourceActionDefinition{} } func (*ResourceActionDefinition) ProtoMessage() {} func (*ResourceActionDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{98} + return fileDescriptor_030104ce3b95bcac, []int{109} } func (m *ResourceActionDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2813,7 +3122,7 @@ var xxx_messageInfo_ResourceActionDefinition proto.InternalMessageInfo func (m *ResourceActionParam) Reset() { *m = ResourceActionParam{} } func (*ResourceActionParam) ProtoMessage() {} func (*ResourceActionParam) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{99} + return fileDescriptor_030104ce3b95bcac, []int{110} } func (m *ResourceActionParam) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2841,7 +3150,7 @@ var xxx_messageInfo_ResourceActionParam proto.InternalMessageInfo func (m *ResourceActions) Reset() { *m = ResourceActions{} } func (*ResourceActions) ProtoMessage() {} func (*ResourceActions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{100} + return fileDescriptor_030104ce3b95bcac, []int{111} } func (m *ResourceActions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2869,7 +3178,7 @@ var xxx_messageInfo_ResourceActions proto.InternalMessageInfo func (m *ResourceDiff) Reset() { *m = ResourceDiff{} } func (*ResourceDiff) ProtoMessage() {} func (*ResourceDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{101} + return fileDescriptor_030104ce3b95bcac, []int{112} } func (m *ResourceDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2897,7 +3206,7 @@ var xxx_messageInfo_ResourceDiff proto.InternalMessageInfo func (m *ResourceIgnoreDifferences) Reset() { *m = ResourceIgnoreDifferences{} } func (*ResourceIgnoreDifferences) ProtoMessage() {} func (*ResourceIgnoreDifferences) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{102} + return fileDescriptor_030104ce3b95bcac, []int{113} } func (m *ResourceIgnoreDifferences) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2925,7 +3234,7 @@ var xxx_messageInfo_ResourceIgnoreDifferences proto.InternalMessageInfo func (m *ResourceNetworkingInfo) Reset() { *m = ResourceNetworkingInfo{} } func (*ResourceNetworkingInfo) ProtoMessage() {} func (*ResourceNetworkingInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{103} + return fileDescriptor_030104ce3b95bcac, []int{114} } func (m *ResourceNetworkingInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2953,7 +3262,7 @@ var xxx_messageInfo_ResourceNetworkingInfo proto.InternalMessageInfo func (m *ResourceNode) Reset() { *m = ResourceNode{} } func (*ResourceNode) ProtoMessage() {} func (*ResourceNode) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{104} + return fileDescriptor_030104ce3b95bcac, []int{115} } func (m *ResourceNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2981,7 +3290,7 @@ var xxx_messageInfo_ResourceNode proto.InternalMessageInfo func (m *ResourceOverride) Reset() { *m = ResourceOverride{} } func (*ResourceOverride) ProtoMessage() {} func (*ResourceOverride) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{105} + return fileDescriptor_030104ce3b95bcac, []int{116} } func (m *ResourceOverride) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3009,7 +3318,7 @@ var xxx_messageInfo_ResourceOverride proto.InternalMessageInfo func (m *ResourceRef) Reset() { *m = ResourceRef{} } func (*ResourceRef) ProtoMessage() {} func (*ResourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{106} + return fileDescriptor_030104ce3b95bcac, []int{117} } func (m *ResourceRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3037,7 +3346,7 @@ var xxx_messageInfo_ResourceRef proto.InternalMessageInfo func (m *ResourceResult) Reset() { *m = ResourceResult{} } func (*ResourceResult) ProtoMessage() {} func (*ResourceResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{107} + return fileDescriptor_030104ce3b95bcac, []int{118} } func (m *ResourceResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3065,7 +3374,7 @@ var xxx_messageInfo_ResourceResult proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{108} + return fileDescriptor_030104ce3b95bcac, []int{119} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3093,7 +3402,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{109} + return fileDescriptor_030104ce3b95bcac, []int{120} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3121,7 +3430,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RevisionHistory) Reset() { *m = RevisionHistory{} } func (*RevisionHistory) ProtoMessage() {} func (*RevisionHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{110} + return fileDescriptor_030104ce3b95bcac, []int{121} } func (m *RevisionHistory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3149,7 +3458,7 @@ var xxx_messageInfo_RevisionHistory proto.InternalMessageInfo func (m *RevisionMetadata) Reset() { *m = RevisionMetadata{} } func (*RevisionMetadata) ProtoMessage() {} func (*RevisionMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{111} + return fileDescriptor_030104ce3b95bcac, []int{122} } func (m *RevisionMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3177,7 +3486,7 @@ var xxx_messageInfo_RevisionMetadata proto.InternalMessageInfo func (m *SCMProviderGenerator) Reset() { *m = SCMProviderGenerator{} } func (*SCMProviderGenerator) ProtoMessage() {} func (*SCMProviderGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{112} + return fileDescriptor_030104ce3b95bcac, []int{123} } func (m *SCMProviderGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3202,10 +3511,38 @@ func (m *SCMProviderGenerator) XXX_DiscardUnknown() { var xxx_messageInfo_SCMProviderGenerator proto.InternalMessageInfo +func (m *SCMProviderGeneratorAWSCodeCommit) Reset() { *m = SCMProviderGeneratorAWSCodeCommit{} } +func (*SCMProviderGeneratorAWSCodeCommit) ProtoMessage() {} +func (*SCMProviderGeneratorAWSCodeCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{124} +} +func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit.Merge(m, src) +} +func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Size() int { + return m.Size() +} +func (m *SCMProviderGeneratorAWSCodeCommit) XXX_DiscardUnknown() { + xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit.DiscardUnknown(m) +} + +var xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit proto.InternalMessageInfo + func (m *SCMProviderGeneratorAzureDevOps) Reset() { *m = SCMProviderGeneratorAzureDevOps{} } func (*SCMProviderGeneratorAzureDevOps) ProtoMessage() {} func (*SCMProviderGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{113} + return fileDescriptor_030104ce3b95bcac, []int{125} } func (m *SCMProviderGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3233,7 +3570,7 @@ var xxx_messageInfo_SCMProviderGeneratorAzureDevOps proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucket) Reset() { *m = SCMProviderGeneratorBitbucket{} } func (*SCMProviderGeneratorBitbucket) ProtoMessage() {} func (*SCMProviderGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{114} + return fileDescriptor_030104ce3b95bcac, []int{126} } func (m *SCMProviderGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3261,7 +3598,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucket proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucketServer) Reset() { *m = SCMProviderGeneratorBitbucketServer{} } func (*SCMProviderGeneratorBitbucketServer) ProtoMessage() {} func (*SCMProviderGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{115} + return fileDescriptor_030104ce3b95bcac, []int{127} } func (m *SCMProviderGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3289,7 +3626,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucketServer proto.InternalMessageInf func (m *SCMProviderGeneratorFilter) Reset() { *m = SCMProviderGeneratorFilter{} } func (*SCMProviderGeneratorFilter) ProtoMessage() {} func (*SCMProviderGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{116} + return fileDescriptor_030104ce3b95bcac, []int{128} } func (m *SCMProviderGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3317,7 +3654,7 @@ var xxx_messageInfo_SCMProviderGeneratorFilter proto.InternalMessageInfo func (m *SCMProviderGeneratorGitea) Reset() { *m = SCMProviderGeneratorGitea{} } func (*SCMProviderGeneratorGitea) ProtoMessage() {} func (*SCMProviderGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{117} + return fileDescriptor_030104ce3b95bcac, []int{129} } func (m *SCMProviderGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3345,7 +3682,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitea proto.InternalMessageInfo func (m *SCMProviderGeneratorGithub) Reset() { *m = SCMProviderGeneratorGithub{} } func (*SCMProviderGeneratorGithub) ProtoMessage() {} func (*SCMProviderGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{118} + return fileDescriptor_030104ce3b95bcac, []int{130} } func (m *SCMProviderGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3373,7 +3710,7 @@ var xxx_messageInfo_SCMProviderGeneratorGithub proto.InternalMessageInfo func (m *SCMProviderGeneratorGitlab) Reset() { *m = SCMProviderGeneratorGitlab{} } func (*SCMProviderGeneratorGitlab) ProtoMessage() {} func (*SCMProviderGeneratorGitlab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{119} + return fileDescriptor_030104ce3b95bcac, []int{131} } func (m *SCMProviderGeneratorGitlab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3401,7 +3738,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitlab proto.InternalMessageInfo func (m *SecretRef) Reset() { *m = SecretRef{} } func (*SecretRef) ProtoMessage() {} func (*SecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{120} + return fileDescriptor_030104ce3b95bcac, []int{132} } func (m *SecretRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3429,7 +3766,7 @@ var xxx_messageInfo_SecretRef proto.InternalMessageInfo func (m *SignatureKey) Reset() { *m = SignatureKey{} } func (*SignatureKey) ProtoMessage() {} func (*SignatureKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{121} + return fileDescriptor_030104ce3b95bcac, []int{133} } func (m *SignatureKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3457,7 +3794,7 @@ var xxx_messageInfo_SignatureKey proto.InternalMessageInfo func (m *SyncOperation) Reset() { *m = SyncOperation{} } func (*SyncOperation) ProtoMessage() {} func (*SyncOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{122} + return fileDescriptor_030104ce3b95bcac, []int{134} } func (m *SyncOperation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3485,7 +3822,7 @@ var xxx_messageInfo_SyncOperation proto.InternalMessageInfo func (m *SyncOperationResource) Reset() { *m = SyncOperationResource{} } func (*SyncOperationResource) ProtoMessage() {} func (*SyncOperationResource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{123} + return fileDescriptor_030104ce3b95bcac, []int{135} } func (m *SyncOperationResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3513,7 +3850,7 @@ var xxx_messageInfo_SyncOperationResource proto.InternalMessageInfo func (m *SyncOperationResult) Reset() { *m = SyncOperationResult{} } func (*SyncOperationResult) ProtoMessage() {} func (*SyncOperationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{124} + return fileDescriptor_030104ce3b95bcac, []int{136} } func (m *SyncOperationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3541,7 +3878,7 @@ var xxx_messageInfo_SyncOperationResult proto.InternalMessageInfo func (m *SyncPolicy) Reset() { *m = SyncPolicy{} } func (*SyncPolicy) ProtoMessage() {} func (*SyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{125} + return fileDescriptor_030104ce3b95bcac, []int{137} } func (m *SyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3569,7 +3906,7 @@ var xxx_messageInfo_SyncPolicy proto.InternalMessageInfo func (m *SyncPolicyAutomated) Reset() { *m = SyncPolicyAutomated{} } func (*SyncPolicyAutomated) ProtoMessage() {} func (*SyncPolicyAutomated) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{126} + return fileDescriptor_030104ce3b95bcac, []int{138} } func (m *SyncPolicyAutomated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3597,7 +3934,7 @@ var xxx_messageInfo_SyncPolicyAutomated proto.InternalMessageInfo func (m *SyncStatus) Reset() { *m = SyncStatus{} } func (*SyncStatus) ProtoMessage() {} func (*SyncStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{127} + return fileDescriptor_030104ce3b95bcac, []int{139} } func (m *SyncStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3625,7 +3962,7 @@ var xxx_messageInfo_SyncStatus proto.InternalMessageInfo func (m *SyncStrategy) Reset() { *m = SyncStrategy{} } func (*SyncStrategy) ProtoMessage() {} func (*SyncStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{128} + return fileDescriptor_030104ce3b95bcac, []int{140} } func (m *SyncStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3653,7 +3990,7 @@ var xxx_messageInfo_SyncStrategy proto.InternalMessageInfo func (m *SyncStrategyApply) Reset() { *m = SyncStrategyApply{} } func (*SyncStrategyApply) ProtoMessage() {} func (*SyncStrategyApply) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{129} + return fileDescriptor_030104ce3b95bcac, []int{141} } func (m *SyncStrategyApply) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3681,7 +4018,7 @@ var xxx_messageInfo_SyncStrategyApply proto.InternalMessageInfo func (m *SyncStrategyHook) Reset() { *m = SyncStrategyHook{} } func (*SyncStrategyHook) ProtoMessage() {} func (*SyncStrategyHook) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{130} + return fileDescriptor_030104ce3b95bcac, []int{142} } func (m *SyncStrategyHook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3709,7 +4046,7 @@ var xxx_messageInfo_SyncStrategyHook proto.InternalMessageInfo func (m *SyncWindow) Reset() { *m = SyncWindow{} } func (*SyncWindow) ProtoMessage() {} func (*SyncWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{131} + return fileDescriptor_030104ce3b95bcac, []int{143} } func (m *SyncWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3737,7 +4074,7 @@ var xxx_messageInfo_SyncWindow proto.InternalMessageInfo func (m *TLSClientConfig) Reset() { *m = TLSClientConfig{} } func (*TLSClientConfig) ProtoMessage() {} func (*TLSClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{132} + return fileDescriptor_030104ce3b95bcac, []int{144} } func (m *TLSClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3762,6 +4099,34 @@ func (m *TLSClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_TLSClientConfig proto.InternalMessageInfo +func (m *TagFilter) Reset() { *m = TagFilter{} } +func (*TagFilter) ProtoMessage() {} +func (*TagFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{145} +} +func (m *TagFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagFilter.Merge(m, src) +} +func (m *TagFilter) XXX_Size() int { + return m.Size() +} +func (m *TagFilter) XXX_DiscardUnknown() { + xxx_messageInfo_TagFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_TagFilter proto.InternalMessageInfo + func init() { proto.RegisterType((*AWSAuthConfig)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.AWSAuthConfig") proto.RegisterType((*AppProject)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.AppProject") @@ -3774,6 +4139,7 @@ func init() { proto.RegisterType((*ApplicationDestination)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationDestination") proto.RegisterType((*ApplicationList)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationList") proto.RegisterType((*ApplicationMatchExpression)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationMatchExpression") + proto.RegisterType((*ApplicationPreservedFields)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationPreservedFields") proto.RegisterType((*ApplicationSet)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSet") proto.RegisterType((*ApplicationSetApplicationStatus)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetApplicationStatus") proto.RegisterType((*ApplicationSetCondition)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetCondition") @@ -3800,7 +4166,6 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourceKustomize.CommonLabelsEntry") proto.RegisterType((*ApplicationSourcePlugin)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourcePlugin") proto.RegisterType((*ApplicationSourcePluginParameter)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourcePluginParameter") - proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSourcePluginParameter.MapEntry") proto.RegisterType((*ApplicationSpec)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSpec") proto.RegisterType((*ApplicationStatus)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationStatus") proto.RegisterType((*ApplicationSummary)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSummary") @@ -3808,6 +4173,8 @@ func init() { proto.RegisterType((*ApplicationWatchEvent)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationWatchEvent") proto.RegisterType((*Backoff)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Backoff") proto.RegisterType((*BasicAuthBitbucketServer)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.BasicAuthBitbucketServer") + proto.RegisterType((*BearerTokenBitbucketCloud)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.BearerTokenBitbucketCloud") + proto.RegisterType((*ChartDetails)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ChartDetails") proto.RegisterType((*Cluster)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Cluster") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Cluster.AnnotationsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Cluster.LabelsEntry") @@ -3830,6 +4197,7 @@ func init() { proto.RegisterType((*GitDirectoryGeneratorItem)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GitDirectoryGeneratorItem") proto.RegisterType((*GitFileGeneratorItem)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GitFileGeneratorItem") proto.RegisterType((*GitGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GitGenerator") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GitGenerator.ValuesEntry") proto.RegisterType((*GnuPGPublicKey)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GnuPGPublicKey") proto.RegisterType((*GnuPGPublicKeyList)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.GnuPGPublicKeyList") proto.RegisterType((*HealthStatus)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HealthStatus") @@ -3845,6 +4213,7 @@ func init() { proto.RegisterType((*JsonnetVar)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.JsonnetVar") proto.RegisterType((*KnownTypeField)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KnownTypeField") proto.RegisterType((*KustomizeOptions)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeOptions") + proto.RegisterType((*KustomizeReplica)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeReplica") proto.RegisterType((*ListGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ListGenerator") proto.RegisterType((*ManagedNamespaceMetadata)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ManagedNamespaceMetadata") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ManagedNamespaceMetadata.AnnotationsEntry") @@ -3856,11 +4225,21 @@ func init() { proto.RegisterType((*Operation)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Operation") proto.RegisterType((*OperationInitiator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OperationInitiator") proto.RegisterType((*OperationState)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OperationState") + proto.RegisterType((*OptionalArray)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OptionalArray") + proto.RegisterType((*OptionalMap)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OptionalMap") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OptionalMap.MapEntry") proto.RegisterType((*OrphanedResourceKey)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OrphanedResourceKey") proto.RegisterType((*OrphanedResourcesMonitorSettings)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OrphanedResourcesMonitorSettings") proto.RegisterType((*OverrideIgnoreDiff)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.OverrideIgnoreDiff") + proto.RegisterType((*PluginConfigMapRef)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PluginConfigMapRef") + proto.RegisterType((*PluginGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PluginGenerator") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PluginGenerator.ValuesEntry") + proto.RegisterType((*PluginInput)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PluginInput") + proto.RegisterMapType((PluginParameters)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PluginInput.ParametersEntry") proto.RegisterType((*ProjectRole)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ProjectRole") proto.RegisterType((*PullRequestGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGenerator") + proto.RegisterType((*PullRequestGeneratorAzureDevOps)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGeneratorAzureDevOps") + proto.RegisterType((*PullRequestGeneratorBitbucket)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGeneratorBitbucket") proto.RegisterType((*PullRequestGeneratorBitbucketServer)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGeneratorBitbucketServer") proto.RegisterType((*PullRequestGeneratorFilter)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGeneratorFilter") proto.RegisterType((*PullRequestGeneratorGitLab)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.PullRequestGeneratorGitLab") @@ -3891,6 +4270,8 @@ func init() { proto.RegisterType((*RevisionHistory)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.RevisionHistory") proto.RegisterType((*RevisionMetadata)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.RevisionMetadata") proto.RegisterType((*SCMProviderGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGenerator") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGenerator.ValuesEntry") + proto.RegisterType((*SCMProviderGeneratorAWSCodeCommit)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGeneratorAWSCodeCommit") proto.RegisterType((*SCMProviderGeneratorAzureDevOps)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGeneratorAzureDevOps") proto.RegisterType((*SCMProviderGeneratorBitbucket)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGeneratorBitbucket") proto.RegisterType((*SCMProviderGeneratorBitbucketServer)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGeneratorBitbucketServer") @@ -3911,6 +4292,7 @@ func init() { proto.RegisterType((*SyncStrategyHook)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncStrategyHook") proto.RegisterType((*SyncWindow)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncWindow") proto.RegisterType((*TLSClientConfig)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.TLSClientConfig") + proto.RegisterType((*TagFilter)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.TagFilter") } func init() { @@ -3918,611 +4300,671 @@ func init() { } var fileDescriptor_030104ce3b95bcac = []byte{ - // 9659 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x24, 0xc7, - 0x75, 0x98, 0x66, 0x17, 0x0b, 0xec, 0x3e, 0x7c, 0xdc, 0xa1, 0xef, 0x8e, 0x04, 0x4f, 0xe4, 0xe1, - 0x6a, 0x58, 0xa6, 0xa9, 0x88, 0x04, 0xc2, 0x13, 0xa5, 0x30, 0xa6, 0x4d, 0x19, 0x0b, 0xdc, 0xe1, - 0x70, 0x07, 0x1c, 0xc0, 0x06, 0xee, 0x4e, 0x22, 0x4d, 0x49, 0x83, 0xd9, 0xde, 0xc5, 0x1c, 0x76, - 0x67, 0x86, 0x33, 0xb3, 0x38, 0x2c, 0x2d, 0xcb, 0x92, 0x2c, 0xd9, 0x4a, 0xf4, 0x41, 0x85, 0xfe, - 0x11, 0xb9, 0x92, 0x38, 0x8a, 0xed, 0x72, 0xc5, 0x95, 0xb0, 0xe2, 0x54, 0x7e, 0xe4, 0xab, 0x52, - 0x15, 0xdb, 0xf9, 0xc1, 0x94, 0x52, 0x15, 0x55, 0xc5, 0x65, 0x39, 0xb1, 0x03, 0x53, 0x97, 0x4a, - 0x25, 0x95, 0xaa, 0x38, 0x95, 0x8f, 0x3f, 0xb9, 0xca, 0x8f, 0x54, 0x7f, 0xf7, 0xcc, 0xee, 0x1e, - 0x76, 0x0f, 0x83, 0xbb, 0x93, 0x8a, 0xff, 0x76, 0xfb, 0xbd, 0x7e, 0xaf, 0xbb, 0xa7, 0xfb, 0xf5, - 0x7b, 0xdd, 0xef, 0xbd, 0x86, 0xd5, 0x86, 0x97, 0xec, 0xb4, 0xb7, 0xe7, 0xdc, 0xa0, 0x35, 0xef, - 0x44, 0x8d, 0x20, 0x8c, 0x82, 0x5b, 0xec, 0xc7, 0xf3, 0x6e, 0x6d, 0x7e, 0xef, 0xc2, 0x7c, 0xb8, - 0xdb, 0x98, 0x77, 0x42, 0x2f, 0x9e, 0x77, 0xc2, 0xb0, 0xe9, 0xb9, 0x4e, 0xe2, 0x05, 0xfe, 0xfc, - 0xde, 0x0b, 0x4e, 0x33, 0xdc, 0x71, 0x5e, 0x98, 0x6f, 0x10, 0x9f, 0x44, 0x4e, 0x42, 0x6a, 0x73, - 0x61, 0x14, 0x24, 0x01, 0xfa, 0x69, 0x4d, 0x6d, 0x4e, 0x52, 0x63, 0x3f, 0x3e, 0xeb, 0xd6, 0xe6, - 0xf6, 0x2e, 0xcc, 0x85, 0xbb, 0x8d, 0x39, 0x4a, 0x6d, 0xce, 0xa0, 0x36, 0x27, 0xa9, 0x9d, 0x7d, - 0xde, 0x68, 0x4b, 0x23, 0x68, 0x04, 0xf3, 0x8c, 0xe8, 0x76, 0xbb, 0xce, 0xfe, 0xb1, 0x3f, 0xec, - 0x17, 0x67, 0x76, 0xd6, 0xde, 0x7d, 0x29, 0x9e, 0xf3, 0x02, 0xda, 0xbc, 0x79, 0x37, 0x88, 0xc8, - 0xfc, 0x5e, 0x57, 0x83, 0xce, 0x5e, 0xd6, 0x38, 0x64, 0x3f, 0x21, 0x7e, 0xec, 0x05, 0x7e, 0xfc, - 0x3c, 0x6d, 0x02, 0x89, 0xf6, 0x48, 0x64, 0x76, 0xcf, 0x40, 0xe8, 0x45, 0xe9, 0x45, 0x4d, 0xa9, - 0xe5, 0xb8, 0x3b, 0x9e, 0x4f, 0xa2, 0x8e, 0xae, 0xde, 0x22, 0x89, 0xd3, 0xab, 0xd6, 0x7c, 0xbf, - 0x5a, 0x51, 0xdb, 0x4f, 0xbc, 0x16, 0xe9, 0xaa, 0xf0, 0x89, 0xc3, 0x2a, 0xc4, 0xee, 0x0e, 0x69, - 0x39, 0x5d, 0xf5, 0x3e, 0xd6, 0xaf, 0x5e, 0x3b, 0xf1, 0x9a, 0xf3, 0x9e, 0x9f, 0xc4, 0x49, 0x94, - 0xad, 0x64, 0xbf, 0x09, 0x93, 0x0b, 0x37, 0x37, 0x17, 0xda, 0xc9, 0xce, 0x62, 0xe0, 0xd7, 0xbd, - 0x06, 0xfa, 0x38, 0x8c, 0xbb, 0xcd, 0x76, 0x9c, 0x90, 0xe8, 0x9a, 0xd3, 0x22, 0x33, 0xd6, 0x79, - 0xeb, 0xd9, 0x4a, 0xf5, 0xd4, 0x7b, 0x07, 0xb3, 0x1f, 0xba, 0x73, 0x30, 0x3b, 0xbe, 0xa8, 0x41, - 0xd8, 0xc4, 0x43, 0x1f, 0x81, 0xb1, 0x28, 0x68, 0x92, 0x05, 0x7c, 0x6d, 0xa6, 0xc0, 0xaa, 0x9c, - 0x10, 0x55, 0xc6, 0x30, 0x2f, 0xc6, 0x12, 0x6e, 0xff, 0x51, 0x01, 0x60, 0x21, 0x0c, 0x37, 0xa2, - 0xe0, 0x16, 0x71, 0x13, 0xf4, 0x39, 0x28, 0xd3, 0xa1, 0xab, 0x39, 0x89, 0xc3, 0xb8, 0x8d, 0x5f, - 0xf8, 0x8b, 0x73, 0xbc, 0x27, 0x73, 0x66, 0x4f, 0xf4, 0xc4, 0xa1, 0xd8, 0x73, 0x7b, 0x2f, 0xcc, - 0xad, 0x6f, 0xd3, 0xfa, 0x6b, 0x24, 0x71, 0xaa, 0x48, 0x30, 0x03, 0x5d, 0x86, 0x15, 0x55, 0xe4, - 0xc3, 0x48, 0x1c, 0x12, 0x97, 0x35, 0x6c, 0xfc, 0xc2, 0xea, 0xdc, 0x51, 0x66, 0xe8, 0x9c, 0x6e, - 0xf9, 0x66, 0x48, 0xdc, 0xea, 0x84, 0xe0, 0x3c, 0x42, 0xff, 0x61, 0xc6, 0x07, 0xed, 0xc1, 0x68, - 0x9c, 0x38, 0x49, 0x3b, 0x9e, 0x29, 0x32, 0x8e, 0xd7, 0x72, 0xe3, 0xc8, 0xa8, 0x56, 0xa7, 0x04, - 0xcf, 0x51, 0xfe, 0x1f, 0x0b, 0x6e, 0xf6, 0x7f, 0xb4, 0x60, 0x4a, 0x23, 0xaf, 0x7a, 0x71, 0x82, - 0x7e, 0xae, 0x6b, 0x70, 0xe7, 0x06, 0x1b, 0x5c, 0x5a, 0x9b, 0x0d, 0xed, 0x49, 0xc1, 0xac, 0x2c, - 0x4b, 0x8c, 0x81, 0x6d, 0x41, 0xc9, 0x4b, 0x48, 0x2b, 0x9e, 0x29, 0x9c, 0x2f, 0x3e, 0x3b, 0x7e, - 0xe1, 0x72, 0x5e, 0xfd, 0xac, 0x4e, 0x0a, 0xa6, 0xa5, 0x15, 0x4a, 0x1e, 0x73, 0x2e, 0xf6, 0xef, - 0x4c, 0x98, 0xfd, 0xa3, 0x03, 0x8e, 0x5e, 0x80, 0xf1, 0x38, 0x68, 0x47, 0x2e, 0xc1, 0x24, 0x0c, - 0xe2, 0x19, 0xeb, 0x7c, 0x91, 0x4e, 0x3d, 0x3a, 0x53, 0x37, 0x75, 0x31, 0x36, 0x71, 0xd0, 0xb7, - 0x2c, 0x98, 0xa8, 0x91, 0x38, 0xf1, 0x7c, 0xc6, 0x5f, 0x36, 0x7e, 0xeb, 0xc8, 0x8d, 0x97, 0x85, - 0x4b, 0x9a, 0x78, 0xf5, 0xb4, 0xe8, 0xc8, 0x84, 0x51, 0x18, 0xe3, 0x14, 0x7f, 0xba, 0xe2, 0x6a, - 0x24, 0x76, 0x23, 0x2f, 0xa4, 0xff, 0xd9, 0x9c, 0x31, 0x56, 0xdc, 0x92, 0x06, 0x61, 0x13, 0x0f, - 0xf9, 0x50, 0xa2, 0x2b, 0x2a, 0x9e, 0x19, 0x61, 0xed, 0x5f, 0x39, 0x5a, 0xfb, 0xc5, 0xa0, 0xd2, - 0xc5, 0xaa, 0x47, 0x9f, 0xfe, 0x8b, 0x31, 0x67, 0x83, 0xbe, 0x69, 0xc1, 0x8c, 0x58, 0xf1, 0x98, - 0xf0, 0x01, 0xbd, 0xb9, 0xe3, 0x25, 0xa4, 0xe9, 0xc5, 0xc9, 0x4c, 0x89, 0xb5, 0x61, 0x7e, 0xb0, - 0xb9, 0xb5, 0x1c, 0x05, 0xed, 0xf0, 0xaa, 0xe7, 0xd7, 0xaa, 0xe7, 0x05, 0xa7, 0x99, 0xc5, 0x3e, - 0x84, 0x71, 0x5f, 0x96, 0xe8, 0x57, 0x2d, 0x38, 0xeb, 0x3b, 0x2d, 0x12, 0x87, 0x0e, 0xfd, 0xb4, - 0x1c, 0x5c, 0x6d, 0x3a, 0xee, 0x2e, 0x6b, 0xd1, 0xe8, 0xfd, 0xb5, 0xc8, 0x16, 0x2d, 0x3a, 0x7b, - 0xad, 0x2f, 0x69, 0x7c, 0x0f, 0xb6, 0xe8, 0x37, 0x2d, 0x98, 0x0e, 0xa2, 0x70, 0xc7, 0xf1, 0x49, - 0x4d, 0x42, 0xe3, 0x99, 0x31, 0xb6, 0xf4, 0x3e, 0x73, 0xb4, 0x4f, 0xb4, 0x9e, 0x25, 0xbb, 0x16, - 0xf8, 0x5e, 0x12, 0x44, 0x9b, 0x24, 0x49, 0x3c, 0xbf, 0x11, 0x57, 0xcf, 0xdc, 0x39, 0x98, 0x9d, - 0xee, 0xc2, 0xc2, 0xdd, 0xed, 0x41, 0x3f, 0x0f, 0xe3, 0x71, 0xc7, 0x77, 0x6f, 0x7a, 0x7e, 0x2d, - 0xb8, 0x1d, 0xcf, 0x94, 0xf3, 0x58, 0xbe, 0x9b, 0x8a, 0xa0, 0x58, 0x80, 0x9a, 0x01, 0x36, 0xb9, - 0xf5, 0xfe, 0x70, 0x7a, 0x2a, 0x55, 0xf2, 0xfe, 0x70, 0x7a, 0x32, 0xdd, 0x83, 0x2d, 0xfa, 0x15, - 0x0b, 0x26, 0x63, 0xaf, 0xe1, 0x3b, 0x49, 0x3b, 0x22, 0x57, 0x49, 0x27, 0x9e, 0x01, 0xd6, 0x90, - 0x2b, 0x47, 0x1c, 0x15, 0x83, 0x64, 0xf5, 0x8c, 0x68, 0xe3, 0xa4, 0x59, 0x1a, 0xe3, 0x34, 0xdf, - 0x5e, 0x0b, 0x4d, 0x4f, 0xeb, 0xf1, 0x7c, 0x17, 0x9a, 0x9e, 0xd4, 0x7d, 0x59, 0xa2, 0x9f, 0x85, - 0x93, 0xbc, 0x48, 0x8d, 0x6c, 0x3c, 0x33, 0xc1, 0x04, 0xed, 0xe9, 0x3b, 0x07, 0xb3, 0x27, 0x37, - 0x33, 0x30, 0xdc, 0x85, 0x8d, 0xde, 0x84, 0xd9, 0x90, 0x44, 0x2d, 0x2f, 0x59, 0xf7, 0x9b, 0x1d, - 0x29, 0xbe, 0xdd, 0x20, 0x24, 0x35, 0xd1, 0x9c, 0x78, 0x66, 0xf2, 0xbc, 0xf5, 0x6c, 0xb9, 0xfa, - 0x93, 0xa2, 0x99, 0xb3, 0x1b, 0xf7, 0x46, 0xc7, 0x87, 0xd1, 0xb3, 0xff, 0x75, 0x01, 0x4e, 0x66, - 0x37, 0x4e, 0xf4, 0xdb, 0x16, 0x9c, 0xb8, 0x75, 0x3b, 0xd9, 0x0a, 0x76, 0x89, 0x1f, 0x57, 0x3b, - 0x54, 0xbc, 0xb1, 0x2d, 0x63, 0xfc, 0x82, 0x9b, 0xef, 0x16, 0x3d, 0x77, 0x25, 0xcd, 0xe5, 0xa2, - 0x9f, 0x44, 0x9d, 0xea, 0xe3, 0xa2, 0x77, 0x27, 0xae, 0xdc, 0xdc, 0x32, 0xa1, 0x38, 0xdb, 0xa8, - 0xb3, 0x5f, 0xb7, 0xe0, 0x74, 0x2f, 0x12, 0xe8, 0x24, 0x14, 0x77, 0x49, 0x87, 0x6b, 0x65, 0x98, - 0xfe, 0x44, 0x6f, 0x40, 0x69, 0xcf, 0x69, 0xb6, 0x89, 0xd0, 0x6e, 0x96, 0x8f, 0xd6, 0x11, 0xd5, - 0x32, 0xcc, 0xa9, 0xfe, 0x54, 0xe1, 0x25, 0xcb, 0xfe, 0xb7, 0x45, 0x18, 0x37, 0xf6, 0xb7, 0x07, - 0xa0, 0xb1, 0x05, 0x29, 0x8d, 0x6d, 0x2d, 0xb7, 0xad, 0xb9, 0xaf, 0xca, 0x76, 0x3b, 0xa3, 0xb2, - 0xad, 0xe7, 0xc7, 0xf2, 0x9e, 0x3a, 0x1b, 0x4a, 0xa0, 0x12, 0x84, 0x54, 0x23, 0xa7, 0x5b, 0xff, - 0x48, 0x1e, 0x9f, 0x70, 0x5d, 0x92, 0xab, 0x4e, 0xde, 0x39, 0x98, 0xad, 0xa8, 0xbf, 0x58, 0x33, - 0xb2, 0x7f, 0x60, 0xc1, 0x69, 0xa3, 0x8d, 0x8b, 0x81, 0x5f, 0xf3, 0xd8, 0xa7, 0x3d, 0x0f, 0x23, - 0x49, 0x27, 0x94, 0x6a, 0xbf, 0x1a, 0xa9, 0xad, 0x4e, 0x48, 0x30, 0x83, 0x50, 0x45, 0xbf, 0x45, - 0xe2, 0xd8, 0x69, 0x90, 0xac, 0xa2, 0xbf, 0xc6, 0x8b, 0xb1, 0x84, 0xa3, 0x08, 0x50, 0xd3, 0x89, - 0x93, 0xad, 0xc8, 0xf1, 0x63, 0x46, 0x7e, 0xcb, 0x6b, 0x11, 0x31, 0xc0, 0x7f, 0x61, 0xb0, 0x19, - 0x43, 0x6b, 0x54, 0x1f, 0xbb, 0x73, 0x30, 0x8b, 0x56, 0xbb, 0x28, 0xe1, 0x1e, 0xd4, 0xed, 0x5f, - 0xb5, 0xe0, 0xb1, 0xde, 0xba, 0x18, 0x7a, 0x06, 0x46, 0xb9, 0xc9, 0x27, 0x7a, 0xa7, 0x3f, 0x09, - 0x2b, 0xc5, 0x02, 0x8a, 0xe6, 0xa1, 0xa2, 0xf6, 0x09, 0xd1, 0xc7, 0x69, 0x81, 0x5a, 0xd1, 0x9b, - 0x8b, 0xc6, 0xa1, 0x83, 0x46, 0xff, 0x08, 0xcd, 0x4d, 0x0d, 0x1a, 0x33, 0x92, 0x18, 0xc4, 0xfe, - 0x33, 0x0b, 0x4e, 0x18, 0xad, 0x7a, 0x00, 0xaa, 0xb9, 0x9f, 0x56, 0xcd, 0x57, 0x72, 0x9b, 0xcf, - 0x7d, 0x74, 0xf3, 0x6f, 0x5a, 0x70, 0xd6, 0xc0, 0x5a, 0x73, 0x12, 0x77, 0xe7, 0xe2, 0x7e, 0x18, - 0x91, 0x98, 0x9a, 0xd3, 0xe8, 0x29, 0x43, 0x6e, 0x55, 0xc7, 0x05, 0x85, 0xe2, 0x55, 0xd2, 0xe1, - 0x42, 0xec, 0x39, 0x28, 0xf3, 0xc9, 0x19, 0x44, 0x62, 0xc4, 0x55, 0xdf, 0xd6, 0x45, 0x39, 0x56, - 0x18, 0xc8, 0x86, 0x51, 0x26, 0x9c, 0xe8, 0x62, 0xa5, 0xdb, 0x10, 0xd0, 0x8f, 0x78, 0x83, 0x95, - 0x60, 0x01, 0xb1, 0xef, 0x14, 0x98, 0xad, 0xa0, 0x56, 0x21, 0x79, 0x10, 0x86, 0x66, 0x94, 0x12, - 0x5b, 0x1b, 0xf9, 0xc9, 0x10, 0xd2, 0xdf, 0xd8, 0x7c, 0x2b, 0x23, 0xb9, 0x70, 0xae, 0x5c, 0xef, - 0x6d, 0x70, 0xfe, 0x5e, 0x01, 0x66, 0xd3, 0x15, 0xba, 0x04, 0x1f, 0xb5, 0x6e, 0x0c, 0x46, 0xd9, - 0xf3, 0x04, 0x03, 0x1f, 0x9b, 0x78, 0x7d, 0x64, 0x47, 0xe1, 0x38, 0x65, 0x87, 0x29, 0xda, 0x8a, - 0x87, 0x88, 0xb6, 0x67, 0xd4, 0xa8, 0x8f, 0x64, 0x64, 0x49, 0x5a, 0xbc, 0x9f, 0x87, 0x91, 0x38, - 0x21, 0xe1, 0x4c, 0x29, 0x2d, 0x1a, 0x36, 0x13, 0x12, 0x62, 0x06, 0xb1, 0xff, 0x5b, 0x01, 0x1e, - 0x4f, 0x8f, 0xa1, 0x96, 0xc6, 0x9f, 0x4c, 0x49, 0xe3, 0x8f, 0x9a, 0xd2, 0xf8, 0xee, 0xc1, 0xec, - 0x87, 0xfb, 0x54, 0xfb, 0x91, 0x11, 0xd6, 0x68, 0x39, 0x33, 0x8a, 0xf3, 0xe9, 0x51, 0xbc, 0x7b, - 0x30, 0xfb, 0x54, 0x9f, 0x3e, 0x66, 0x86, 0xf9, 0x19, 0x18, 0x8d, 0x88, 0x13, 0x07, 0xbe, 0x18, - 0x68, 0xf5, 0x39, 0x30, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x59, 0x39, 0x3b, 0xd8, 0xcb, 0xfc, 0x3c, - 0x2c, 0x88, 0x90, 0x07, 0x23, 0x4c, 0xc3, 0xe6, 0xa2, 0xe1, 0xea, 0xd1, 0x96, 0x11, 0x95, 0xc8, - 0x8a, 0x74, 0xb5, 0x4c, 0xbf, 0x1a, 0x2d, 0xc2, 0x8c, 0x05, 0xda, 0x87, 0xb2, 0x2b, 0x15, 0xdf, - 0x42, 0x1e, 0x47, 0x44, 0x42, 0xed, 0xd5, 0x1c, 0x27, 0xa8, 0xe8, 0x54, 0xda, 0xb2, 0xe2, 0x86, - 0x08, 0x14, 0x1b, 0x5e, 0x22, 0x3e, 0xeb, 0x11, 0x4d, 0x9b, 0x65, 0xcf, 0xe8, 0xe2, 0x18, 0x95, - 0xe7, 0xcb, 0x5e, 0x82, 0x29, 0x7d, 0xf4, 0x55, 0x0b, 0xc6, 0x63, 0xb7, 0xb5, 0x11, 0x05, 0x7b, - 0x5e, 0x8d, 0x44, 0x42, 0xb1, 0x39, 0xa2, 0x68, 0xda, 0x5c, 0x5c, 0x93, 0x04, 0x35, 0x5f, 0x6e, - 0x6a, 0x6a, 0x08, 0x36, 0xf9, 0x52, 0x85, 0xff, 0x71, 0xd1, 0xf7, 0x25, 0xe2, 0x7a, 0x74, 0x2b, - 0x92, 0xf6, 0x0d, 0x9b, 0x29, 0x47, 0x56, 0xf4, 0x96, 0xda, 0xee, 0x2e, 0x5d, 0x6f, 0xba, 0x41, - 0x1f, 0xbe, 0x73, 0x30, 0xfb, 0xf8, 0x62, 0x6f, 0x9e, 0xb8, 0x5f, 0x63, 0xd8, 0x80, 0x85, 0xed, - 0x66, 0x13, 0x93, 0x37, 0xdb, 0x84, 0x9d, 0x5e, 0xe4, 0x30, 0x60, 0x1b, 0x9a, 0x60, 0x66, 0xc0, - 0x0c, 0x08, 0x36, 0xf9, 0xa2, 0x37, 0x61, 0xb4, 0xe5, 0x24, 0x91, 0xb7, 0x2f, 0x8e, 0x2c, 0x8e, - 0xa8, 0x7a, 0xaf, 0x31, 0x5a, 0x9a, 0x39, 0xdb, 0xa9, 0x79, 0x21, 0x16, 0x8c, 0x50, 0x0b, 0x4a, - 0x2d, 0x12, 0x35, 0xc8, 0x4c, 0x39, 0x8f, 0xe3, 0xd9, 0x35, 0x4a, 0x4a, 0x33, 0xac, 0x50, 0x45, - 0x85, 0x95, 0x61, 0xce, 0x05, 0xbd, 0x01, 0xe5, 0x98, 0x34, 0x89, 0x4b, 0x55, 0x8d, 0x0a, 0xe3, - 0xf8, 0xb1, 0x01, 0xd5, 0x2e, 0x67, 0x9b, 0x34, 0x37, 0x45, 0x55, 0xbe, 0xc0, 0xe4, 0x3f, 0xac, - 0x48, 0xda, 0xff, 0xd9, 0x02, 0x94, 0x96, 0x30, 0x0f, 0x40, 0xd9, 0x7b, 0x33, 0xad, 0xec, 0xad, - 0xe6, 0xa9, 0x02, 0xf4, 0xd1, 0xf7, 0xde, 0x2b, 0x43, 0x46, 0x36, 0x5f, 0x23, 0x71, 0x42, 0x6a, - 0x1f, 0xc8, 0xd3, 0x0f, 0xe4, 0xe9, 0x07, 0xf2, 0x54, 0xc9, 0xd3, 0xed, 0x8c, 0x3c, 0x7d, 0xc5, - 0x58, 0xf5, 0xfa, 0xb2, 0xf1, 0xb3, 0xea, 0x36, 0xd2, 0x6c, 0x81, 0x81, 0x40, 0x25, 0xc1, 0x95, - 0xcd, 0xf5, 0x6b, 0x3d, 0x05, 0xe8, 0x67, 0xd3, 0x02, 0xf4, 0xa8, 0x2c, 0x1e, 0xb8, 0xc8, 0xfc, - 0x1b, 0x05, 0x78, 0x22, 0x2d, 0x4a, 0x70, 0xd0, 0x6c, 0x06, 0xed, 0x84, 0x6a, 0xc9, 0xe8, 0xd7, - 0x2d, 0x38, 0xd9, 0x4a, 0x5b, 0x93, 0xb1, 0x38, 0xb4, 0xfb, 0x54, 0x6e, 0x72, 0x2e, 0x63, 0xae, - 0x56, 0x67, 0x84, 0xcc, 0x3b, 0x99, 0x01, 0xc4, 0xb8, 0xab, 0x2d, 0xe8, 0x0d, 0xa8, 0xb4, 0x9c, - 0xfd, 0xeb, 0x61, 0xcd, 0x49, 0xa4, 0x81, 0xd2, 0xdf, 0xae, 0x6c, 0x27, 0x5e, 0x73, 0x8e, 0x5f, - 0xc5, 0xce, 0xad, 0xf8, 0xc9, 0x7a, 0xb4, 0x99, 0x44, 0x9e, 0xdf, 0xe0, 0x47, 0x35, 0x6b, 0x92, - 0x0c, 0xd6, 0x14, 0xed, 0xbf, 0x65, 0x65, 0x05, 0xad, 0x1a, 0x9d, 0xc8, 0x49, 0x48, 0xa3, 0x83, - 0x3e, 0x0f, 0x25, 0x6a, 0x49, 0xc8, 0x51, 0xb9, 0x99, 0xa7, 0xf4, 0x37, 0xbe, 0x84, 0xde, 0x08, - 0xe8, 0xbf, 0x18, 0x73, 0xa6, 0xf6, 0x9d, 0x91, 0xec, 0x86, 0xc7, 0x2e, 0xe6, 0x2e, 0x00, 0x34, - 0x82, 0x2d, 0xd2, 0x0a, 0x9b, 0x74, 0x58, 0x2c, 0x76, 0xba, 0xab, 0x8c, 0xe7, 0x65, 0x05, 0xc1, - 0x06, 0x16, 0xfa, 0x2b, 0x16, 0x40, 0x43, 0x2e, 0x2c, 0xb9, 0x99, 0x5d, 0xcf, 0xb3, 0x3b, 0x7a, - 0xd9, 0xea, 0xb6, 0x28, 0x86, 0xd8, 0x60, 0x8e, 0xbe, 0x6c, 0x41, 0x39, 0x91, 0xcd, 0xe7, 0xe2, - 0x7d, 0x2b, 0xcf, 0x96, 0xc8, 0x4e, 0xeb, 0x7d, 0x5d, 0x0d, 0x89, 0xe2, 0x8b, 0x7e, 0xd9, 0x02, - 0x88, 0x3b, 0xbe, 0xbb, 0x11, 0x34, 0x3d, 0xb7, 0x23, 0xa4, 0xfe, 0x8d, 0x5c, 0x0d, 0x7c, 0x45, - 0xbd, 0x3a, 0x45, 0x47, 0x43, 0xff, 0xc7, 0x06, 0x67, 0xf4, 0x05, 0x28, 0xc7, 0x62, 0xba, 0x09, - 0x39, 0xbf, 0x95, 0xef, 0x31, 0x03, 0xa7, 0x2d, 0x44, 0x84, 0xf8, 0x87, 0x15, 0x4f, 0xfb, 0x7b, - 0x85, 0xd4, 0x79, 0xa5, 0x3a, 0x99, 0x60, 0x53, 0xc6, 0x95, 0x46, 0xa1, 0x5c, 0x01, 0xb9, 0x4e, - 0x19, 0x65, 0x72, 0xea, 0x29, 0xa3, 0x8a, 0x62, 0x6c, 0x30, 0xa7, 0x9b, 0xe3, 0xb4, 0x93, 0x3d, - 0xff, 0x10, 0xb3, 0xf8, 0x8d, 0x3c, 0x9b, 0xd4, 0x7d, 0xba, 0xfc, 0x84, 0x68, 0xda, 0x74, 0x17, - 0x08, 0x77, 0x37, 0xc9, 0xfe, 0x5e, 0xfa, 0x8c, 0xd4, 0xf8, 0x00, 0x03, 0x9c, 0xff, 0x7e, 0xcb, - 0x82, 0xf1, 0x28, 0x68, 0x36, 0x3d, 0xbf, 0x41, 0x27, 0x8b, 0x90, 0x78, 0xaf, 0x1f, 0x8b, 0xd0, - 0x11, 0xb3, 0x82, 0x6d, 0xb1, 0x58, 0xf3, 0xc4, 0x66, 0x03, 0xec, 0x2f, 0x59, 0x30, 0xd3, 0x6f, - 0x52, 0x23, 0x02, 0x1f, 0xa6, 0x92, 0x9a, 0x6e, 0x7c, 0xea, 0xf6, 0x73, 0xdd, 0x5f, 0x22, 0x4d, - 0xa2, 0x4e, 0xa3, 0xca, 0xd5, 0xa7, 0x45, 0x37, 0x3f, 0xbc, 0xd1, 0x1f, 0x15, 0xdf, 0x8b, 0x8e, - 0xfd, 0x5b, 0x85, 0xec, 0x88, 0x2a, 0xa1, 0xf6, 0x1d, 0xab, 0x4b, 0xf5, 0xff, 0xd4, 0x71, 0x08, - 0x12, 0x66, 0x24, 0xa8, 0x4b, 0xd0, 0xfe, 0x38, 0x0f, 0xf1, 0x96, 0xc5, 0xfe, 0x37, 0x23, 0x70, - 0x8f, 0x96, 0xa9, 0x73, 0x74, 0xab, 0xdf, 0x39, 0xfa, 0xf0, 0x47, 0xf3, 0xdf, 0xb0, 0x60, 0xb4, - 0x49, 0xb5, 0x10, 0x7e, 0x56, 0x3c, 0x7e, 0xa1, 0x76, 0x5c, 0x63, 0xcf, 0x95, 0x9d, 0x98, 0xdf, - 0xf4, 0xa9, 0xf3, 0x27, 0x5e, 0x88, 0x45, 0x1b, 0xd0, 0x77, 0x2d, 0x18, 0x77, 0x7c, 0x3f, 0x48, - 0x84, 0xeb, 0x09, 0x77, 0xdd, 0xf0, 0x8e, 0xad, 0x4d, 0x0b, 0x9a, 0x17, 0x6f, 0x98, 0x3e, 0x78, - 0xd5, 0x10, 0x6c, 0x36, 0x09, 0xcd, 0x01, 0xd4, 0x3d, 0xdf, 0x69, 0x7a, 0x6f, 0x51, 0x6b, 0xaa, - 0xc4, 0x0e, 0xd8, 0xd9, 0xd6, 0x70, 0x49, 0x95, 0x62, 0x03, 0xe3, 0xec, 0x5f, 0x86, 0x71, 0xa3, - 0xe7, 0x3d, 0x2e, 0x28, 0x4f, 0x9b, 0x17, 0x94, 0x15, 0xe3, 0x5e, 0xf1, 0xec, 0x2b, 0x70, 0x32, - 0xdb, 0xc0, 0x61, 0xea, 0xdb, 0xbf, 0x3d, 0x9a, 0x3d, 0x7e, 0xde, 0x22, 0x51, 0x8b, 0x36, 0xed, - 0x03, 0x2b, 0xf4, 0x03, 0x2b, 0xf4, 0x03, 0x2b, 0x54, 0xfe, 0xb1, 0xef, 0x94, 0x20, 0xa5, 0x19, - 0xf0, 0xd6, 0x7d, 0x04, 0xc6, 0x22, 0x12, 0x06, 0xd7, 0xf1, 0xaa, 0x90, 0xb8, 0xda, 0x65, 0x93, - 0x17, 0x63, 0x09, 0xa7, 0x92, 0x39, 0x74, 0x92, 0x1d, 0x21, 0x72, 0x95, 0x64, 0xde, 0x70, 0x92, - 0x1d, 0xcc, 0x20, 0xe8, 0x15, 0x98, 0x4a, 0x9c, 0xa8, 0x41, 0x12, 0x4c, 0xf6, 0xd8, 0x20, 0x88, - 0x23, 0xfd, 0xc7, 0x04, 0xee, 0xd4, 0x56, 0x0a, 0x8a, 0x33, 0xd8, 0xe8, 0x4d, 0x18, 0xd9, 0x21, - 0xcd, 0x96, 0x30, 0x93, 0x37, 0xf3, 0x93, 0x88, 0xac, 0xaf, 0x97, 0x49, 0xb3, 0xc5, 0xd7, 0x2b, - 0xfd, 0x85, 0x19, 0x2b, 0xfa, 0x75, 0x2a, 0xbb, 0xed, 0x38, 0x09, 0x5a, 0xde, 0x5b, 0xd2, 0x78, - 0xfe, 0x54, 0xce, 0x8c, 0xaf, 0x4a, 0xfa, 0xdc, 0xc2, 0x53, 0x7f, 0xb1, 0xe6, 0xcc, 0xda, 0x51, - 0xf3, 0x22, 0x66, 0x0c, 0x77, 0x66, 0xe0, 0x58, 0xda, 0xb1, 0x24, 0xe9, 0xf3, 0x76, 0xa8, 0xbf, - 0x58, 0x73, 0x46, 0x1d, 0x18, 0x0d, 0x9b, 0xed, 0x86, 0xe7, 0xcf, 0x8c, 0xb3, 0x36, 0x5c, 0xcf, - 0xb9, 0x0d, 0x1b, 0x8c, 0x38, 0x3f, 0xc2, 0xe0, 0xbf, 0xb1, 0x60, 0x88, 0x9e, 0x86, 0x92, 0xbb, - 0xe3, 0x44, 0xc9, 0xcc, 0x04, 0x9b, 0x34, 0xca, 0xd2, 0x5c, 0xa4, 0x85, 0x98, 0xc3, 0xd0, 0x53, - 0x50, 0x8c, 0x48, 0x9d, 0x79, 0x0a, 0x19, 0x77, 0xc8, 0x98, 0xd4, 0x31, 0x2d, 0xb7, 0xff, 0x4e, - 0x21, 0xad, 0x5c, 0xa4, 0xfb, 0xcd, 0x67, 0xbb, 0xdb, 0x8e, 0x62, 0x69, 0x8d, 0x1a, 0xb3, 0x9d, - 0x15, 0x63, 0x09, 0x47, 0x5f, 0xb2, 0x60, 0xec, 0x56, 0x1c, 0xf8, 0x3e, 0x49, 0x84, 0x20, 0xbf, - 0x91, 0xf3, 0x50, 0x5c, 0xe1, 0xd4, 0x75, 0x1b, 0x44, 0x01, 0x96, 0x7c, 0x69, 0x73, 0xc9, 0xbe, - 0xdb, 0x6c, 0xd7, 0xba, 0xee, 0x22, 0x2f, 0xf2, 0x62, 0x2c, 0xe1, 0x14, 0xd5, 0xf3, 0x39, 0xea, - 0x48, 0x1a, 0x75, 0xc5, 0x17, 0xa8, 0x02, 0x6e, 0xff, 0x6e, 0x09, 0xce, 0xf4, 0x5c, 0x1c, 0x74, - 0xdb, 0x67, 0x1b, 0xeb, 0x25, 0xaf, 0x49, 0xa4, 0x1f, 0x2d, 0xdb, 0xf6, 0x6f, 0xa8, 0x52, 0x6c, - 0x60, 0xa0, 0x5f, 0x04, 0x08, 0x9d, 0xc8, 0x69, 0x11, 0xb1, 0xdd, 0x15, 0x8f, 0xbe, 0xbb, 0xd2, - 0x76, 0x6c, 0x48, 0x9a, 0xda, 0xda, 0x52, 0x45, 0x31, 0x36, 0x58, 0xa2, 0x8f, 0xc3, 0x78, 0x44, - 0x9a, 0xc4, 0x89, 0x99, 0xa3, 0x59, 0xd6, 0x6b, 0x16, 0x6b, 0x10, 0x36, 0xf1, 0xd0, 0x33, 0xca, - 0x77, 0x20, 0x73, 0x71, 0x9b, 0xf6, 0x1f, 0x40, 0x6f, 0x5b, 0x30, 0x55, 0xf7, 0x9a, 0x44, 0x73, - 0x17, 0x3e, 0xae, 0xeb, 0x47, 0xef, 0xe4, 0x25, 0x93, 0xae, 0x96, 0x90, 0xa9, 0xe2, 0x18, 0x67, - 0xd8, 0xd3, 0xcf, 0xbc, 0x47, 0x22, 0x26, 0x5a, 0x47, 0xd3, 0x9f, 0xf9, 0x06, 0x2f, 0xc6, 0x12, - 0x8e, 0x16, 0xe0, 0x44, 0xe8, 0xc4, 0xf1, 0x62, 0x44, 0x6a, 0xc4, 0x4f, 0x3c, 0xa7, 0xc9, 0x3d, - 0x50, 0xcb, 0xda, 0x03, 0x6d, 0x23, 0x0d, 0xc6, 0x59, 0x7c, 0xf4, 0x69, 0x78, 0xdc, 0x6b, 0xf8, - 0x41, 0x44, 0xd6, 0xbc, 0x38, 0xf6, 0xfc, 0x86, 0x9e, 0x06, 0x4c, 0x52, 0x96, 0xab, 0xb3, 0x82, - 0xd4, 0xe3, 0x2b, 0xbd, 0xd1, 0x70, 0xbf, 0xfa, 0xe8, 0x39, 0x28, 0xc7, 0xbb, 0x5e, 0xb8, 0x18, - 0xd5, 0x62, 0x76, 0x9c, 0x58, 0xd6, 0x67, 0x20, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0xd7, 0x0a, - 0x69, 0xf3, 0xce, 0x5c, 0x3f, 0x28, 0xa6, 0xab, 0x24, 0xb9, 0xe1, 0x44, 0xd2, 0xf4, 0x3f, 0xa2, - 0x0f, 0xab, 0xa0, 0x7b, 0xc3, 0x89, 0xcc, 0xf5, 0xc6, 0x18, 0x60, 0xc9, 0x09, 0xdd, 0x82, 0x91, - 0xa4, 0xe9, 0xe4, 0xe4, 0xf4, 0x6e, 0x70, 0xd4, 0xd6, 0xf6, 0xea, 0x42, 0x8c, 0x19, 0x0f, 0xf4, - 0x24, 0x55, 0x5f, 0xb7, 0xa5, 0xa3, 0x8b, 0xd0, 0x38, 0xb7, 0x63, 0xcc, 0x4a, 0xed, 0xff, 0x31, - 0xda, 0x43, 0xe4, 0xa9, 0x3d, 0x06, 0x5d, 0x00, 0xa0, 0x96, 0xd0, 0x46, 0x44, 0xea, 0xde, 0xbe, - 0xd8, 0xe3, 0xd5, 0xb2, 0xba, 0xa6, 0x20, 0xd8, 0xc0, 0x92, 0x75, 0x36, 0xdb, 0x75, 0x5a, 0xa7, - 0xd0, 0x5d, 0x87, 0x43, 0xb0, 0x81, 0x85, 0x5e, 0x84, 0x51, 0xaf, 0xe5, 0x34, 0x94, 0x3f, 0xce, - 0x93, 0x74, 0x3d, 0xad, 0xb0, 0x92, 0xbb, 0x07, 0xb3, 0x53, 0xaa, 0x41, 0xac, 0x08, 0x0b, 0x5c, - 0xf4, 0x5b, 0x16, 0x4c, 0xb8, 0x41, 0xab, 0x15, 0xf8, 0xdc, 0x7e, 0x10, 0xc6, 0xd0, 0xad, 0xe3, - 0xda, 0x81, 0xe7, 0x16, 0x0d, 0x66, 0xdc, 0x1a, 0x52, 0xde, 0xf9, 0x26, 0x08, 0xa7, 0x5a, 0x65, - 0x2e, 0xbb, 0xd2, 0x21, 0xcb, 0xee, 0x9f, 0x58, 0x30, 0xcd, 0xeb, 0x1a, 0x66, 0x8d, 0x70, 0x44, - 0x0f, 0x8e, 0xb9, 0x5b, 0x5d, 0x96, 0x9e, 0x3a, 0x12, 0xea, 0x82, 0xe3, 0xee, 0x46, 0xa2, 0x65, - 0x98, 0xae, 0x07, 0x91, 0x4b, 0xcc, 0x81, 0x10, 0x32, 0x43, 0x11, 0xba, 0x94, 0x45, 0xc0, 0xdd, - 0x75, 0xd0, 0x0d, 0x78, 0xcc, 0x28, 0x34, 0xc7, 0x81, 0x8b, 0x8d, 0x73, 0x82, 0xda, 0x63, 0x97, - 0x7a, 0x62, 0xe1, 0x3e, 0xb5, 0xcf, 0x7e, 0x12, 0xa6, 0xbb, 0xbe, 0xdf, 0x50, 0xc6, 0xe6, 0x12, - 0x3c, 0xd6, 0x7b, 0xa4, 0x86, 0x32, 0x39, 0xff, 0x51, 0xc6, 0x5b, 0xc7, 0x50, 0x6c, 0x06, 0x38, - 0xbe, 0x70, 0xa0, 0x48, 0xfc, 0x3d, 0x21, 0x38, 0x2e, 0x1d, 0x6d, 0x46, 0x5c, 0xf4, 0xf7, 0xf8, - 0x87, 0x66, 0x36, 0xda, 0x45, 0x7f, 0x0f, 0x53, 0xda, 0xe8, 0x1d, 0x2b, 0xb5, 0x31, 0xf3, 0x43, - 0x8f, 0xcf, 0x1c, 0x8b, 0x26, 0x37, 0xf0, 0x5e, 0x6d, 0x7f, 0xaf, 0x00, 0xe7, 0x0f, 0x23, 0x32, - 0xc0, 0xf0, 0x3d, 0x0d, 0xa3, 0x31, 0xbb, 0x2e, 0x11, 0x2b, 0x71, 0x9c, 0xae, 0x42, 0x7e, 0x81, - 0xf2, 0x59, 0x2c, 0x40, 0xe8, 0x97, 0x2d, 0x28, 0xb6, 0x9c, 0x50, 0xf4, 0xbc, 0x71, 0xbc, 0x3d, - 0x9f, 0x5b, 0x73, 0x42, 0xfe, 0x15, 0x94, 0x3e, 0xba, 0xe6, 0x84, 0x98, 0x36, 0x00, 0xcd, 0x42, - 0xc9, 0x89, 0x22, 0xa7, 0xc3, 0xe4, 0x5a, 0x85, 0x5f, 0xab, 0x2d, 0xd0, 0x02, 0xcc, 0xcb, 0xcf, - 0x7e, 0x02, 0xca, 0xb2, 0xfa, 0x50, 0x73, 0xf0, 0x1b, 0x63, 0x29, 0x67, 0x52, 0x76, 0xdd, 0x12, - 0xc3, 0xa8, 0x30, 0x80, 0xad, 0xbc, 0xfd, 0x97, 0x79, 0x34, 0x00, 0xd3, 0xda, 0x45, 0x4c, 0x95, - 0x60, 0x85, 0xbe, 0x6e, 0xb1, 0xc8, 0x25, 0xe9, 0x60, 0x2b, 0x74, 0xe5, 0xe3, 0x09, 0xa4, 0x32, - 0xe3, 0xa1, 0x64, 0x21, 0x36, 0xb9, 0x53, 0x41, 0x1d, 0x72, 0x1f, 0xfc, 0xac, 0xc6, 0x2c, 0x63, - 0x9b, 0x24, 0x1c, 0xed, 0xf7, 0xb8, 0x56, 0xc9, 0x21, 0xfa, 0x65, 0x80, 0x8b, 0x94, 0xef, 0x5a, - 0x30, 0xcd, 0xf5, 0xa2, 0x25, 0xaf, 0x5e, 0x27, 0x11, 0xf1, 0x5d, 0x22, 0x35, 0xcb, 0x23, 0x5e, - 0xdc, 0xc9, 0x53, 0x87, 0x95, 0x2c, 0x79, 0x2d, 0xc1, 0xbb, 0x40, 0xb8, 0xbb, 0x31, 0xa8, 0x06, - 0x23, 0x9e, 0x5f, 0x0f, 0xc4, 0xbe, 0x55, 0x3d, 0x5a, 0xa3, 0x56, 0xfc, 0x7a, 0xa0, 0xd7, 0x32, - 0xfd, 0x87, 0x19, 0x75, 0xb4, 0x0a, 0xa7, 0x23, 0x61, 0xfb, 0x5f, 0xf6, 0x62, 0x6a, 0xa1, 0xad, - 0x7a, 0x2d, 0x2f, 0x61, 0x7b, 0x4e, 0xb1, 0x3a, 0x73, 0xe7, 0x60, 0xf6, 0x34, 0xee, 0x01, 0xc7, - 0x3d, 0x6b, 0xa1, 0xb7, 0x60, 0x4c, 0x86, 0x5a, 0x95, 0xf3, 0xd0, 0xd2, 0xbb, 0xe7, 0xbf, 0x9a, - 0x4c, 0x9b, 0x22, 0xaa, 0x4a, 0x32, 0xb4, 0xff, 0x25, 0x40, 0xf7, 0xb5, 0x0b, 0xfa, 0x05, 0xa8, - 0x44, 0x2a, 0xfc, 0xcb, 0xca, 0xc3, 0x2d, 0x47, 0x7e, 0x5f, 0x71, 0xe5, 0xa3, 0xce, 0xbd, 0x75, - 0xa0, 0x97, 0xe6, 0x48, 0x75, 0xd4, 0x58, 0xdf, 0xce, 0xe4, 0x30, 0xb7, 0x05, 0x57, 0x7d, 0xaa, - 0xdf, 0xf1, 0x5d, 0xcc, 0x78, 0xa0, 0x08, 0x46, 0x77, 0x88, 0xd3, 0x4c, 0x76, 0xf2, 0x39, 0x80, - 0xbc, 0xcc, 0x68, 0x65, 0x3d, 0x8f, 0x79, 0x29, 0x16, 0x9c, 0xd0, 0x3e, 0x8c, 0xed, 0xf0, 0x09, - 0x20, 0xd4, 0xc6, 0xb5, 0xa3, 0x0e, 0x6e, 0x6a, 0x56, 0xe9, 0xcf, 0x2d, 0x0a, 0xb0, 0x64, 0xc7, - 0xee, 0x64, 0x8d, 0x1b, 0x47, 0xbe, 0x74, 0xf3, 0x73, 0xba, 0x1e, 0xfc, 0xba, 0xf1, 0x73, 0x30, - 0x11, 0x11, 0x37, 0xf0, 0x5d, 0xaf, 0x49, 0x6a, 0x0b, 0xf2, 0x70, 0x71, 0x18, 0x57, 0xdd, 0x93, - 0x54, 0xf5, 0xc5, 0x06, 0x0d, 0x9c, 0xa2, 0x88, 0xbe, 0x66, 0xc1, 0x94, 0x8a, 0x19, 0xa1, 0x1f, - 0x84, 0x88, 0xe3, 0xb9, 0xd5, 0x9c, 0x22, 0x54, 0x18, 0xcd, 0x2a, 0xa2, 0xc6, 0x6f, 0xba, 0x0c, - 0x67, 0xf8, 0xa2, 0xd7, 0x00, 0x82, 0x6d, 0x76, 0xfd, 0x46, 0xbb, 0x5a, 0x1e, 0xba, 0xab, 0x53, - 0xdc, 0x67, 0x5f, 0x52, 0xc0, 0x06, 0x35, 0x74, 0x15, 0x80, 0x2f, 0x9b, 0xad, 0x4e, 0x48, 0x98, - 0x45, 0xaa, 0x7d, 0xad, 0x61, 0x53, 0x41, 0xee, 0x1e, 0xcc, 0x76, 0x9f, 0x9d, 0xb0, 0x8b, 0x51, - 0xa3, 0x3a, 0xfa, 0x79, 0x18, 0x8b, 0xdb, 0xad, 0x96, 0xa3, 0x4e, 0xf2, 0x72, 0x8c, 0x02, 0xe0, - 0x74, 0x0d, 0x51, 0xc4, 0x0b, 0xb0, 0xe4, 0x88, 0x6e, 0x51, 0xa1, 0x1a, 0x8b, 0x43, 0x1d, 0xb6, - 0x8a, 0xb8, 0x4e, 0x30, 0xce, 0xfa, 0xf4, 0x09, 0x51, 0xef, 0x34, 0xee, 0x81, 0x73, 0xf7, 0x60, - 0xf6, 0xb1, 0x74, 0xf9, 0x6a, 0x20, 0xfc, 0xf2, 0x7b, 0xd2, 0x44, 0x57, 0x64, 0xe4, 0x35, 0xed, - 0xb6, 0x0c, 0x08, 0x7c, 0x56, 0x47, 0x5e, 0xb3, 0xe2, 0xfe, 0x63, 0x66, 0x56, 0xb6, 0xfd, 0xb4, - 0x0b, 0x89, 0xe8, 0xcd, 0x8b, 0x30, 0x41, 0xf6, 0x13, 0x12, 0xf9, 0x4e, 0xf3, 0x3a, 0x5e, 0x95, - 0x87, 0x52, 0x6c, 0xd2, 0x5e, 0x34, 0xca, 0x71, 0x0a, 0x0b, 0xd9, 0xca, 0x18, 0x2d, 0xe8, 0xe0, - 0x10, 0x6e, 0x8c, 0x4a, 0xd3, 0xd3, 0xfe, 0xbf, 0x85, 0x94, 0x06, 0xb5, 0x15, 0x11, 0x82, 0x02, - 0x28, 0xf9, 0x41, 0x4d, 0x09, 0xeb, 0x2b, 0xf9, 0x08, 0xeb, 0x6b, 0x41, 0xcd, 0x88, 0xa7, 0xa6, - 0xff, 0x62, 0xcc, 0xf9, 0xb0, 0x80, 0x53, 0x19, 0x99, 0xcb, 0x00, 0xc2, 0x2e, 0xc8, 0x93, 0xb3, - 0x0a, 0x38, 0x5d, 0x37, 0x19, 0xe1, 0x34, 0x5f, 0xb4, 0x0b, 0xa5, 0x9d, 0x20, 0x4e, 0xa4, 0xb5, - 0x70, 0x44, 0xc3, 0xe4, 0x72, 0x10, 0x27, 0x6c, 0xdb, 0x57, 0xdd, 0xa6, 0x25, 0x31, 0xe6, 0x3c, - 0xec, 0xff, 0x62, 0xa5, 0x8e, 0x20, 0x6f, 0x32, 0x77, 0xaa, 0x3d, 0xe2, 0xd3, 0x75, 0x68, 0xfa, - 0x1e, 0xfc, 0xa5, 0x4c, 0xb4, 0xc3, 0x4f, 0xf6, 0xcb, 0x6e, 0x71, 0x9b, 0x52, 0x98, 0x63, 0x24, - 0x0c, 0x37, 0x85, 0x2f, 0x5a, 0xe9, 0xb8, 0x13, 0xbe, 0x11, 0xe6, 0x18, 0x06, 0x75, 0x68, 0x08, - 0x8b, 0xfd, 0x8e, 0x05, 0x63, 0x55, 0xc7, 0xdd, 0x0d, 0xea, 0x75, 0xf4, 0x1c, 0x94, 0x6b, 0xed, - 0xc8, 0x0c, 0x81, 0x51, 0x67, 0x5e, 0x4b, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x87, 0xeb, 0x8e, 0x2b, - 0x83, 0xa1, 0x8a, 0x7c, 0x0e, 0x5f, 0x62, 0x25, 0x58, 0x40, 0xd0, 0xc7, 0x61, 0xbc, 0xe5, 0xec, - 0xcb, 0xca, 0xd9, 0xf3, 0xcf, 0x35, 0x0d, 0xc2, 0x26, 0x9e, 0xfd, 0xaf, 0x2c, 0x98, 0xa9, 0x3a, - 0xb1, 0xe7, 0x2e, 0xb4, 0x93, 0x9d, 0xaa, 0x97, 0x6c, 0xb7, 0xdd, 0x5d, 0x92, 0xf0, 0x08, 0x38, - 0xda, 0xca, 0x76, 0x4c, 0x97, 0x92, 0x32, 0xc3, 0x54, 0x2b, 0xaf, 0x8b, 0x72, 0xac, 0x30, 0xd0, - 0x5b, 0x30, 0x1e, 0x3a, 0x71, 0x7c, 0x3b, 0x88, 0x6a, 0x98, 0xd4, 0xf3, 0x89, 0x3f, 0xdd, 0x24, - 0x6e, 0x44, 0x12, 0x4c, 0xea, 0xe2, 0x46, 0x4b, 0xd3, 0xc7, 0x26, 0x33, 0xfb, 0xf7, 0x2a, 0x30, - 0x26, 0xae, 0xe3, 0x06, 0x8e, 0xeb, 0x93, 0x06, 0x66, 0xa1, 0xaf, 0x81, 0x19, 0xc3, 0xa8, 0xcb, - 0xb2, 0xa0, 0x08, 0x4d, 0xe6, 0x6a, 0x2e, 0xf7, 0xb7, 0x3c, 0xb1, 0x8a, 0x6e, 0x16, 0xff, 0x8f, - 0x05, 0x2b, 0xf4, 0x6d, 0x0b, 0x4e, 0xb8, 0x81, 0xef, 0x13, 0x57, 0x6f, 0xb3, 0x23, 0x79, 0x78, - 0x64, 0x2c, 0xa6, 0x89, 0xea, 0xc3, 0xdf, 0x0c, 0x00, 0x67, 0xd9, 0xa3, 0x97, 0x61, 0x92, 0x8f, - 0xd9, 0x8d, 0xd4, 0xc9, 0x97, 0x0e, 0x5f, 0x37, 0x81, 0x38, 0x8d, 0x8b, 0xe6, 0xf8, 0x09, 0xa2, - 0x08, 0x14, 0x1f, 0xd5, 0x37, 0x09, 0x46, 0x88, 0xb8, 0x81, 0x81, 0x22, 0x40, 0x11, 0xa9, 0x47, - 0x24, 0xde, 0x11, 0xd7, 0x95, 0x6c, 0x8b, 0x1f, 0xbb, 0xbf, 0xc0, 0x23, 0xdc, 0x45, 0x09, 0xf7, - 0xa0, 0x8e, 0x76, 0x85, 0x8d, 0x53, 0xce, 0x43, 0x2a, 0x88, 0xcf, 0xdc, 0xd7, 0xd4, 0x99, 0x85, - 0x52, 0xbc, 0xe3, 0x44, 0x35, 0xa6, 0x5a, 0x14, 0xf9, 0x41, 0xc0, 0x26, 0x2d, 0xc0, 0xbc, 0x1c, - 0x2d, 0xc1, 0xc9, 0x4c, 0xf0, 0x7d, 0xcc, 0x94, 0x87, 0xb2, 0xf6, 0x43, 0xcd, 0x84, 0xed, 0xc7, - 0xb8, 0xab, 0x86, 0x69, 0xff, 0x8e, 0x1f, 0x62, 0xff, 0x76, 0x94, 0x53, 0xcc, 0x04, 0x93, 0xf8, - 0xaf, 0xe6, 0x32, 0x00, 0x03, 0x79, 0xc0, 0x7c, 0x33, 0xe3, 0x01, 0x33, 0xc9, 0x1a, 0x70, 0x23, - 0x9f, 0x06, 0x0c, 0xef, 0xee, 0xf2, 0x30, 0xdd, 0x57, 0xfe, 0x8f, 0x05, 0xf2, 0xbb, 0x2e, 0x3a, - 0xee, 0x0e, 0xa1, 0x53, 0x06, 0xbd, 0x02, 0x53, 0xca, 0x8a, 0x5b, 0x0c, 0xda, 0x3e, 0xf7, 0x5c, - 0x29, 0xea, 0x5b, 0x22, 0x9c, 0x82, 0xe2, 0x0c, 0x36, 0x9a, 0x87, 0x0a, 0x1d, 0x27, 0x5e, 0x95, - 0xef, 0x1e, 0xca, 0x52, 0x5c, 0xd8, 0x58, 0x11, 0xb5, 0x34, 0x0e, 0x0a, 0x60, 0xba, 0xe9, 0xc4, - 0x09, 0x6b, 0x01, 0x35, 0xea, 0xee, 0x33, 0xec, 0x8f, 0xe5, 0x1e, 0x59, 0xcd, 0x12, 0xc2, 0xdd, - 0xb4, 0xed, 0x1f, 0x8c, 0xc0, 0x64, 0x4a, 0x32, 0x0e, 0xb9, 0xed, 0x3c, 0x07, 0x65, 0xb9, 0x13, - 0x64, 0x63, 0x85, 0xd5, 0x76, 0xa1, 0x30, 0xe8, 0x36, 0xb9, 0x4d, 0x9c, 0x88, 0x44, 0x2c, 0xad, - 0x41, 0x76, 0x9b, 0xac, 0x6a, 0x10, 0x36, 0xf1, 0x98, 0x50, 0x4e, 0x9a, 0xf1, 0x62, 0xd3, 0x23, - 0x7e, 0xc2, 0x9b, 0x99, 0x8f, 0x50, 0xde, 0x5a, 0xdd, 0x34, 0x89, 0x6a, 0xa1, 0x9c, 0x01, 0xe0, - 0x2c, 0x7b, 0xf4, 0x15, 0x0b, 0x26, 0x9d, 0xdb, 0xb1, 0x4e, 0xd5, 0x25, 0x7c, 0x5d, 0x8e, 0xb8, - 0x49, 0xa5, 0xb2, 0x7f, 0x55, 0xa7, 0xa9, 0x78, 0x4f, 0x15, 0xe1, 0x34, 0x53, 0xf4, 0x1d, 0x0b, - 0x10, 0xd9, 0x27, 0xae, 0xf4, 0xc6, 0x11, 0x6d, 0x19, 0xcd, 0xc3, 0xd8, 0xb9, 0xd8, 0x45, 0x97, - 0x4b, 0xf5, 0xee, 0x72, 0xdc, 0xa3, 0x0d, 0xf6, 0x3f, 0x2b, 0xaa, 0x05, 0xa5, 0x1d, 0xc0, 0x1c, - 0x23, 0x78, 0xc1, 0xba, 0xff, 0xe0, 0x05, 0x7d, 0x45, 0xd9, 0x15, 0xc0, 0x90, 0xf6, 0x15, 0x2f, - 0x3c, 0x24, 0x5f, 0xf1, 0x2f, 0x5b, 0xa9, 0xa8, 0xf8, 0xf1, 0x0b, 0xaf, 0xe5, 0xeb, 0x7c, 0x36, - 0xc7, 0x2f, 0xc8, 0x33, 0xd2, 0x3d, 0x7d, 0x6b, 0x4e, 0xa5, 0xa9, 0x81, 0x36, 0x94, 0x34, 0xfc, - 0x0f, 0x45, 0x18, 0x37, 0x76, 0xd2, 0x9e, 0x6a, 0x91, 0xf5, 0x88, 0xa9, 0x45, 0x85, 0x21, 0xd4, - 0xa2, 0x5f, 0x84, 0x8a, 0x2b, 0xa5, 0x7c, 0x3e, 0x79, 0xe1, 0xb2, 0x7b, 0x87, 0x16, 0xf4, 0xaa, - 0x08, 0x6b, 0x9e, 0x68, 0x39, 0xe5, 0x9d, 0x2e, 0x76, 0x88, 0x11, 0xb6, 0x43, 0xf4, 0x72, 0x1f, - 0x17, 0x3b, 0x45, 0x77, 0x1d, 0xf4, 0x02, 0xb5, 0xac, 0x3c, 0xd1, 0x2f, 0xe9, 0x22, 0xca, 0xd4, - 0xf5, 0x85, 0x8d, 0x15, 0x59, 0x8c, 0x4d, 0x1c, 0xfb, 0x07, 0x96, 0xfa, 0xb8, 0x0f, 0x20, 0x1c, - 0xf2, 0x56, 0x3a, 0x1c, 0xf2, 0x62, 0x2e, 0xc3, 0xdc, 0x27, 0x0e, 0xf2, 0x1a, 0x8c, 0x2d, 0x06, - 0xad, 0x96, 0xe3, 0xd7, 0xd0, 0x4f, 0xc0, 0x98, 0xcb, 0x7f, 0x8a, 0xa3, 0x0a, 0x76, 0x3f, 0x25, - 0xa0, 0x58, 0xc2, 0xd0, 0x93, 0x30, 0xe2, 0x44, 0x0d, 0x79, 0x3c, 0xc1, 0xae, 0xf4, 0x17, 0xa2, - 0x46, 0x8c, 0x59, 0xa9, 0xfd, 0x76, 0x11, 0x60, 0x31, 0x68, 0x85, 0x4e, 0x44, 0x6a, 0x5b, 0x01, - 0xcb, 0x4b, 0x73, 0xac, 0xf7, 0x3a, 0xda, 0x58, 0x7a, 0x94, 0xef, 0x76, 0x8c, 0xf3, 0xfd, 0xe2, - 0x83, 0x3e, 0xdf, 0xff, 0x86, 0x05, 0x88, 0x7e, 0x91, 0xc0, 0x27, 0x7e, 0xa2, 0xaf, 0x2b, 0xe7, - 0xa1, 0xe2, 0xca, 0x52, 0xa1, 0xb5, 0xe8, 0xf5, 0x27, 0x01, 0x58, 0xe3, 0x0c, 0x60, 0x7e, 0x3e, - 0x2d, 0x85, 0x63, 0x31, 0xed, 0x05, 0xc7, 0x44, 0xaa, 0x90, 0x95, 0xf6, 0xef, 0x17, 0xe0, 0x31, - 0xbe, 0xdf, 0xad, 0x39, 0xbe, 0xd3, 0x20, 0x2d, 0xda, 0xaa, 0x41, 0x2f, 0xa0, 0x5d, 0x6a, 0xf7, - 0x78, 0xd2, 0xab, 0xed, 0xa8, 0x0b, 0x83, 0x4f, 0x68, 0x3e, 0x85, 0x57, 0x7c, 0x2f, 0xc1, 0x8c, - 0x38, 0x8a, 0xa1, 0x2c, 0xb3, 0x8c, 0x0a, 0x41, 0x97, 0x13, 0x23, 0xb5, 0xe6, 0xc5, 0xa6, 0x44, - 0xb0, 0x62, 0x44, 0xb5, 0xc2, 0x66, 0xe0, 0xee, 0x62, 0x12, 0x06, 0x4c, 0xa8, 0x19, 0x4e, 0x45, - 0xab, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0xf7, 0x2d, 0xc8, 0x8a, 0x7b, 0x23, 0xa5, 0x87, 0x75, 0xcf, - 0x94, 0x1e, 0x43, 0xe4, 0xd4, 0xf8, 0x39, 0x18, 0x77, 0x12, 0xba, 0x43, 0x73, 0x9b, 0xb6, 0x78, - 0x7f, 0xc7, 0xd6, 0x6b, 0x41, 0xcd, 0xab, 0x7b, 0xcc, 0x96, 0x35, 0xc9, 0xd9, 0xff, 0x6b, 0x04, - 0xa6, 0xbb, 0x3c, 0x95, 0xd1, 0x4b, 0x30, 0xe1, 0x8a, 0xe9, 0x11, 0x62, 0x52, 0x17, 0x9d, 0x31, - 0x3c, 0x5d, 0x34, 0x0c, 0xa7, 0x30, 0x07, 0x98, 0xa0, 0x2b, 0x70, 0x2a, 0xa2, 0x56, 0x74, 0x9b, - 0x2c, 0xd4, 0x13, 0x12, 0x6d, 0x12, 0x37, 0xf0, 0x6b, 0x3c, 0xf1, 0x4c, 0xb1, 0xfa, 0xf8, 0x9d, - 0x83, 0xd9, 0x53, 0xb8, 0x1b, 0x8c, 0x7b, 0xd5, 0x41, 0x21, 0x4c, 0x36, 0x4d, 0x05, 0x4b, 0x68, - 0xd7, 0xf7, 0xa5, 0x9b, 0xa9, 0x0d, 0x38, 0x55, 0x8c, 0xd3, 0x0c, 0xd2, 0x5a, 0x5a, 0xe9, 0x21, - 0x69, 0x69, 0xbf, 0xa4, 0xb5, 0x34, 0x7e, 0xbf, 0xfa, 0x7a, 0xce, 0x9e, 0xea, 0xc7, 0xad, 0xa6, - 0xbd, 0x0a, 0x65, 0xe9, 0x79, 0x32, 0x90, 0xc7, 0x86, 0x49, 0xa7, 0x8f, 0x44, 0xbb, 0x5b, 0x80, - 0x1e, 0x1a, 0x3e, 0x5d, 0x67, 0x7a, 0x3b, 0x4d, 0xad, 0xb3, 0xe1, 0xb6, 0x54, 0xb4, 0xcf, 0xbd, - 0x6e, 0xf8, 0xc6, 0xf1, 0xe9, 0xbc, 0x2d, 0x14, 0xed, 0x88, 0xa3, 0x5c, 0x40, 0x94, 0x33, 0xce, - 0x05, 0x00, 0xad, 0x05, 0x09, 0x87, 0x53, 0x75, 0xad, 0xa7, 0x95, 0x25, 0x6c, 0x60, 0x51, 0x83, - 0xd5, 0xf3, 0xe3, 0xc4, 0x69, 0x36, 0x2f, 0x7b, 0x7e, 0x22, 0x4e, 0xde, 0xd4, 0x0e, 0xb9, 0xa2, - 0x41, 0xd8, 0xc4, 0x3b, 0xfb, 0x09, 0xe3, 0xbb, 0x0c, 0xf3, 0x3d, 0x77, 0xe0, 0x89, 0x65, 0x2f, - 0x51, 0x6e, 0xd2, 0x6a, 0x1e, 0x51, 0x25, 0x47, 0xb9, 0xfd, 0x5b, 0x7d, 0xdd, 0xfe, 0x0d, 0x37, - 0xe5, 0x42, 0xda, 0xab, 0x3a, 0xeb, 0xa6, 0x6c, 0xbf, 0x04, 0xa7, 0x97, 0xbd, 0xe4, 0x92, 0xd7, - 0x24, 0x43, 0x32, 0xb1, 0xbf, 0x52, 0x82, 0x09, 0x33, 0x2c, 0x65, 0x98, 0xc8, 0x85, 0x6f, 0x51, - 0x3d, 0x46, 0xf4, 0xce, 0x53, 0x77, 0x2c, 0x37, 0x8f, 0x1c, 0x23, 0xd3, 0x7b, 0xc4, 0x0c, 0x55, - 0x46, 0xf3, 0xc4, 0x66, 0x03, 0xd0, 0x6d, 0x28, 0xd5, 0x99, 0x1b, 0x6d, 0x31, 0x8f, 0x9b, 0xe3, - 0x5e, 0x23, 0xaa, 0x97, 0x19, 0x77, 0xc4, 0xe5, 0xfc, 0xe8, 0x0e, 0x19, 0xa5, 0x63, 0x33, 0x94, - 0xa0, 0x52, 0x51, 0x19, 0x0a, 0xa3, 0x9f, 0xa8, 0x2f, 0xdd, 0x87, 0xa8, 0x4f, 0x09, 0xde, 0xd1, - 0x87, 0x24, 0x78, 0x99, 0x4b, 0x74, 0xb2, 0xc3, 0xf4, 0x37, 0xe1, 0x10, 0x3b, 0xc6, 0x06, 0xc1, - 0x70, 0x89, 0x4e, 0x81, 0x71, 0x16, 0xdf, 0xfe, 0x46, 0x01, 0xa6, 0x96, 0xfd, 0xf6, 0xc6, 0xf2, - 0x46, 0x7b, 0xbb, 0xe9, 0xb9, 0x57, 0x49, 0x87, 0xca, 0xb7, 0x5d, 0xd2, 0x59, 0x59, 0x12, 0xd3, - 0x50, 0x0d, 0xfc, 0x55, 0x5a, 0x88, 0x39, 0x8c, 0xae, 0xe8, 0xba, 0xe7, 0x37, 0x48, 0x14, 0x46, - 0x9e, 0x38, 0x94, 0x33, 0x56, 0xf4, 0x25, 0x0d, 0xc2, 0x26, 0x1e, 0xa5, 0x1d, 0xdc, 0xf6, 0x49, - 0x94, 0xd5, 0x06, 0xd7, 0x69, 0x21, 0xe6, 0x30, 0x8a, 0x94, 0x44, 0xed, 0x38, 0x11, 0x5f, 0x54, - 0x21, 0x6d, 0xd1, 0x42, 0xcc, 0x61, 0x74, 0xb9, 0xc4, 0xed, 0x6d, 0x76, 0xbb, 0x9d, 0x71, 0x61, - 0xdd, 0xe4, 0xc5, 0x58, 0xc2, 0x29, 0xea, 0x2e, 0xe9, 0x2c, 0x51, 0xbb, 0x2c, 0xe3, 0x64, 0x7e, - 0x95, 0x17, 0x63, 0x09, 0x67, 0x99, 0x6e, 0xd2, 0xc3, 0xf1, 0x23, 0x97, 0xe9, 0x26, 0xdd, 0xfc, - 0x3e, 0x16, 0xde, 0x6f, 0x58, 0x30, 0x61, 0xfa, 0xa4, 0xa0, 0x46, 0x46, 0x51, 0x5c, 0xef, 0xca, - 0x5a, 0xf6, 0x33, 0xbd, 0x5e, 0x54, 0x68, 0x78, 0x49, 0x10, 0xc6, 0xcf, 0x13, 0xbf, 0xe1, 0xf9, - 0x84, 0xdd, 0x5c, 0x72, 0x5f, 0x96, 0x94, 0xc3, 0xcb, 0x62, 0x50, 0x23, 0xf7, 0xa1, 0x69, 0xda, - 0x37, 0x61, 0xba, 0x2b, 0xb2, 0x60, 0x80, 0xfd, 0xf9, 0xd0, 0xb8, 0x2e, 0x1b, 0xc3, 0x38, 0x25, - 0xbc, 0x1e, 0x72, 0xa7, 0x93, 0x45, 0x98, 0xe6, 0x3a, 0x04, 0xe5, 0xb4, 0xe9, 0xee, 0x90, 0x96, - 0x8a, 0x16, 0x61, 0x27, 0xc0, 0x37, 0xb2, 0x40, 0xdc, 0x8d, 0x6f, 0x7f, 0xd3, 0x82, 0xc9, 0x54, - 0xb0, 0x47, 0x4e, 0x9a, 0x04, 0x5b, 0x69, 0x01, 0x73, 0x91, 0x62, 0x5e, 0xa2, 0x45, 0xb6, 0x23, - 0xe9, 0x95, 0xa6, 0x41, 0xd8, 0xc4, 0xb3, 0xdf, 0x29, 0x40, 0x59, 0xde, 0x5a, 0x0f, 0xd0, 0x94, - 0xaf, 0x5b, 0x30, 0xa9, 0x4e, 0xdd, 0xd9, 0x71, 0x0e, 0x9f, 0x8c, 0xd7, 0x8e, 0x7e, 0x6f, 0xae, - 0x7c, 0xf8, 0xfc, 0x7a, 0xa0, 0xd5, 0x5a, 0x6c, 0x32, 0xc3, 0x69, 0xde, 0xe8, 0x06, 0x40, 0xdc, - 0x89, 0x13, 0xd2, 0x32, 0x0e, 0x96, 0x6c, 0x63, 0xc5, 0xcd, 0xb9, 0x41, 0x44, 0xe8, 0xfa, 0xba, - 0x16, 0xd4, 0xc8, 0xa6, 0xc2, 0xd4, 0x7a, 0x88, 0x2e, 0xc3, 0x06, 0x25, 0xfb, 0x1f, 0x14, 0xe0, - 0x64, 0xb6, 0x49, 0xe8, 0x75, 0x98, 0x90, 0xdc, 0x8d, 0xd7, 0x21, 0xe4, 0x55, 0xfd, 0x04, 0x36, - 0x60, 0x77, 0x0f, 0x66, 0x67, 0xbb, 0x5f, 0xe7, 0x98, 0x33, 0x51, 0x70, 0x8a, 0x18, 0xbf, 0xfa, - 0x10, 0x77, 0x74, 0xd5, 0xce, 0x42, 0x18, 0x8a, 0xfb, 0x0b, 0xe3, 0xea, 0xc3, 0x84, 0xe2, 0x0c, - 0x36, 0xda, 0x80, 0xd3, 0x46, 0xc9, 0x35, 0xe2, 0x35, 0x76, 0xb6, 0x83, 0x48, 0x9a, 0x27, 0x4f, - 0x6a, 0xef, 0x97, 0x6e, 0x1c, 0xdc, 0xb3, 0x26, 0xdd, 0x32, 0x5d, 0x27, 0x74, 0x5c, 0x2f, 0xe9, - 0x88, 0x93, 0x32, 0x25, 0x9b, 0x16, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x1a, 0x8c, 0x0c, 0x38, 0x83, - 0x06, 0x52, 0x8b, 0x5f, 0x85, 0x32, 0x25, 0x27, 0x75, 0xa4, 0x3c, 0x48, 0x06, 0x50, 0x96, 0x09, - 0x9e, 0x91, 0x0d, 0x45, 0xcf, 0x91, 0xb7, 0x4b, 0xaa, 0x5b, 0x2b, 0x71, 0xdc, 0x66, 0x96, 0x26, - 0x05, 0xa2, 0xa7, 0xa1, 0x48, 0xf6, 0xc3, 0xec, 0x35, 0xd2, 0xc5, 0xfd, 0xd0, 0x8b, 0x48, 0x4c, - 0x91, 0xc8, 0x7e, 0x88, 0xce, 0x42, 0xc1, 0xab, 0x89, 0x4d, 0x0a, 0x04, 0x4e, 0x61, 0x65, 0x09, - 0x17, 0xbc, 0x9a, 0xbd, 0x0f, 0x15, 0x95, 0x51, 0x1a, 0xed, 0x4a, 0xd9, 0x6d, 0xe5, 0xe1, 0x66, - 0x22, 0xe9, 0xf6, 0x91, 0xda, 0x6d, 0x00, 0x1d, 0x5a, 0x93, 0x97, 0x7c, 0x39, 0x0f, 0x23, 0x6e, - 0x20, 0x22, 0xf2, 0xca, 0x9a, 0x0c, 0x13, 0xda, 0x0c, 0x62, 0xdf, 0x84, 0xa9, 0xab, 0x7e, 0x70, - 0x9b, 0xe5, 0xe0, 0xbc, 0xe4, 0x91, 0x66, 0x8d, 0x12, 0xae, 0xd3, 0x1f, 0x59, 0x15, 0x81, 0x41, - 0x31, 0x87, 0xa9, 0xb4, 0x1b, 0x85, 0x7e, 0x69, 0x37, 0xec, 0x2f, 0x5a, 0x70, 0x52, 0xc5, 0x7c, - 0x48, 0x69, 0xfc, 0x12, 0x4c, 0x6c, 0xb7, 0xbd, 0x66, 0x4d, 0xfc, 0xcf, 0xda, 0xfa, 0x55, 0x03, - 0x86, 0x53, 0x98, 0xd4, 0x32, 0xd9, 0xf6, 0x7c, 0x27, 0xea, 0x6c, 0x68, 0xf1, 0xaf, 0x24, 0x42, - 0x55, 0x41, 0xb0, 0x81, 0x65, 0x7f, 0xb9, 0x00, 0x93, 0xa9, 0x08, 0x78, 0xd4, 0x84, 0x32, 0x69, - 0xb2, 0x13, 0x28, 0xf9, 0x51, 0x8f, 0x9a, 0x7c, 0x4a, 0x4d, 0xc4, 0x8b, 0x82, 0x2e, 0x56, 0x1c, - 0x1e, 0x89, 0x6b, 0x16, 0xfb, 0x0f, 0x8a, 0x30, 0xc3, 0x0f, 0xde, 0x6a, 0xca, 0x9f, 0x61, 0x4d, - 0x6a, 0x27, 0x7f, 0x55, 0x67, 0x9b, 0xe0, 0xc3, 0xb1, 0x7d, 0xd4, 0xf4, 0x89, 0xbd, 0x19, 0x0d, - 0x74, 0xd3, 0xfe, 0xeb, 0x99, 0x9b, 0xf6, 0x42, 0x1e, 0x01, 0x11, 0x7d, 0x5b, 0xf4, 0xa3, 0x75, - 0xf5, 0xfe, 0x77, 0x0b, 0x70, 0x22, 0x93, 0x9b, 0x12, 0xbd, 0x9d, 0xce, 0x3e, 0x65, 0xe5, 0x71, - 0x3c, 0x73, 0xcf, 0x0c, 0x89, 0xc3, 0xe5, 0xa0, 0x7a, 0x58, 0x13, 0xfe, 0x0f, 0x0b, 0x30, 0x95, - 0x4e, 0xaa, 0xf9, 0x08, 0x8e, 0xd4, 0x47, 0xa1, 0xc2, 0x52, 0xd5, 0xb1, 0x77, 0x3b, 0xf8, 0x29, - 0x10, 0xcf, 0xa8, 0x26, 0x0b, 0xb1, 0x86, 0x3f, 0x12, 0xa9, 0xbd, 0xec, 0xbf, 0x67, 0xc1, 0x19, - 0xde, 0xcb, 0xec, 0x3c, 0xfc, 0x6b, 0xbd, 0x46, 0xf7, 0x8d, 0x7c, 0x1b, 0x98, 0xc9, 0x92, 0x72, - 0xd8, 0xf8, 0xb2, 0xf7, 0x02, 0x44, 0x6b, 0xd3, 0x53, 0xe1, 0x11, 0x6c, 0xec, 0x50, 0x93, 0xc1, - 0xfe, 0xc3, 0x22, 0xe8, 0x27, 0x12, 0x90, 0x27, 0xc2, 0x26, 0x72, 0xc9, 0x16, 0xb3, 0xd9, 0xf1, - 0x5d, 0xfd, 0x18, 0x43, 0x39, 0x13, 0x35, 0xf1, 0x2b, 0x16, 0x8c, 0x7b, 0xbe, 0x97, 0x78, 0x0e, - 0x53, 0x3a, 0xf3, 0xc9, 0x19, 0xaf, 0xd8, 0xad, 0x70, 0xca, 0x41, 0x64, 0x1e, 0x1d, 0x2a, 0x66, - 0xd8, 0xe4, 0x8c, 0x3e, 0x27, 0x9c, 0xe1, 0x8a, 0xb9, 0x05, 0xfc, 0x94, 0x33, 0x1e, 0x70, 0x21, - 0x94, 0x22, 0x92, 0x44, 0x32, 0xd4, 0xea, 0xea, 0x51, 0x3d, 0x9c, 0x93, 0xa8, 0xa3, 0x92, 0x83, - 0xe9, 0xc7, 0xaa, 0x68, 0x31, 0xe6, 0x8c, 0xec, 0x18, 0x50, 0xf7, 0x58, 0x0c, 0xe9, 0x68, 0x34, - 0x0f, 0x15, 0xa7, 0x9d, 0x04, 0x2d, 0x3a, 0x4c, 0xe2, 0x74, 0x53, 0xbb, 0x52, 0x49, 0x00, 0xd6, - 0x38, 0xf6, 0xdb, 0x25, 0xc8, 0xc4, 0x31, 0xa0, 0x7d, 0xf3, 0x79, 0x0f, 0x2b, 0xdf, 0xe7, 0x3d, - 0x54, 0x63, 0x7a, 0x3d, 0xf1, 0x81, 0x1a, 0x50, 0x0a, 0x77, 0x9c, 0x58, 0xea, 0x94, 0xaf, 0xca, - 0x61, 0xda, 0xa0, 0x85, 0x77, 0x0f, 0x66, 0x7f, 0x76, 0xb0, 0x33, 0x0a, 0x3a, 0x57, 0xe7, 0x79, - 0xbc, 0xb0, 0x66, 0xcd, 0x68, 0x60, 0x4e, 0x7f, 0x98, 0xac, 0xf9, 0x5f, 0x12, 0xf9, 0x0c, 0x31, - 0x89, 0xdb, 0xcd, 0x44, 0xcc, 0x86, 0x57, 0x73, 0x5c, 0x65, 0x9c, 0xb0, 0x8e, 0xc0, 0xe3, 0xff, - 0xb1, 0xc1, 0x14, 0xbd, 0x0e, 0x95, 0x38, 0x71, 0xa2, 0xe4, 0x3e, 0x63, 0x66, 0xd4, 0xa0, 0x6f, - 0x4a, 0x22, 0x58, 0xd3, 0x43, 0xaf, 0xb1, 0xe4, 0x59, 0x5e, 0xbc, 0x73, 0x9f, 0x3e, 0xac, 0x32, - 0xd1, 0x96, 0xa0, 0x80, 0x0d, 0x6a, 0x54, 0x65, 0x67, 0x73, 0x9b, 0x3b, 0x6e, 0x94, 0x99, 0x4d, - 0xa6, 0x44, 0x21, 0x56, 0x10, 0x6c, 0x60, 0xd9, 0x5f, 0x80, 0x53, 0xd9, 0xf7, 0xc0, 0xc4, 0xb1, - 0x65, 0x23, 0x0a, 0xda, 0x61, 0xd6, 0x26, 0x61, 0xef, 0x45, 0x61, 0x0e, 0xa3, 0x36, 0xc9, 0xae, - 0xe7, 0xd7, 0xb2, 0x36, 0xc9, 0x55, 0xcf, 0xaf, 0x61, 0x06, 0x19, 0xe0, 0xdd, 0x93, 0x7f, 0x6e, - 0xc1, 0xf9, 0xc3, 0x9e, 0x2d, 0x43, 0x4f, 0xc2, 0xc8, 0x6d, 0x27, 0x92, 0xc9, 0xf8, 0x98, 0xec, - 0xb8, 0xe9, 0x44, 0x3e, 0x66, 0xa5, 0xa8, 0x03, 0xa3, 0x3c, 0x46, 0x51, 0x28, 0xb0, 0xaf, 0xe6, - 0xfb, 0x88, 0xda, 0x55, 0x62, 0x68, 0xd0, 0x3c, 0x3e, 0x12, 0x0b, 0x86, 0xf6, 0xfb, 0x16, 0xa0, - 0xf5, 0x3d, 0x12, 0x45, 0x5e, 0xcd, 0x88, 0xaa, 0x44, 0x2f, 0xc2, 0xc4, 0xad, 0xcd, 0xf5, 0x6b, - 0x1b, 0x81, 0xe7, 0xb3, 0x18, 0x6b, 0x23, 0x2e, 0xe5, 0x8a, 0x51, 0x8e, 0x53, 0x58, 0x68, 0x11, - 0xa6, 0x6f, 0xbd, 0x49, 0xed, 0x28, 0x33, 0x8f, 0x6d, 0x41, 0x9f, 0x9c, 0x5d, 0x79, 0x35, 0x03, - 0xc4, 0xdd, 0xf8, 0x68, 0x1d, 0xce, 0xb4, 0xb8, 0x06, 0xce, 0xcc, 0xc7, 0x98, 0xab, 0xe3, 0x91, - 0x4c, 0xbc, 0xf0, 0xc4, 0x9d, 0x83, 0xd9, 0x33, 0x6b, 0xbd, 0x10, 0x70, 0xef, 0x7a, 0xf6, 0xbb, - 0x05, 0x18, 0x37, 0x9e, 0xfe, 0x1b, 0xc0, 0x50, 0xce, 0xbc, 0x56, 0x58, 0x18, 0xf0, 0xb5, 0xc2, - 0x67, 0xa1, 0x1c, 0x06, 0x4d, 0xcf, 0xf5, 0x54, 0x96, 0x08, 0x96, 0xcc, 0x6c, 0x43, 0x94, 0x61, - 0x05, 0x45, 0xb7, 0xa1, 0xa2, 0x9e, 0xc3, 0x12, 0xc1, 0x7d, 0x79, 0x1d, 0x15, 0xa8, 0xc5, 0xab, - 0x9f, 0xb9, 0xd2, 0xbc, 0x90, 0x0d, 0xa3, 0x6c, 0xe6, 0x4b, 0x97, 0x26, 0x16, 0x75, 0xc1, 0x96, - 0x44, 0x8c, 0x05, 0xc4, 0xfe, 0xea, 0x18, 0x9c, 0xee, 0x95, 0x80, 0x0b, 0x7d, 0x1e, 0x46, 0x79, - 0x1b, 0xf3, 0xc9, 0xf1, 0xd8, 0x8b, 0xc7, 0x32, 0x23, 0x28, 0x9a, 0xc5, 0x7e, 0x63, 0xc1, 0x53, - 0x70, 0x6f, 0x3a, 0xdb, 0x42, 0x8d, 0x38, 0x1e, 0xee, 0xab, 0x8e, 0xe6, 0xbe, 0xea, 0x70, 0xee, - 0x4d, 0x67, 0x1b, 0xed, 0x43, 0xa9, 0xe1, 0x25, 0xc4, 0x11, 0xca, 0xf4, 0xcd, 0x63, 0x61, 0x4e, - 0x1c, 0xee, 0x39, 0xcf, 0x7e, 0x62, 0xce, 0x10, 0x7d, 0xd7, 0x82, 0x13, 0xdb, 0xe9, 0x20, 0x16, - 0xb1, 0xab, 0x38, 0xc7, 0x90, 0x64, 0x2d, 0xcd, 0xa8, 0x7a, 0xea, 0xce, 0xc1, 0xec, 0x89, 0x4c, - 0x21, 0xce, 0x36, 0x07, 0xfd, 0x92, 0x05, 0x63, 0x75, 0xaf, 0x69, 0x64, 0x10, 0x3a, 0x86, 0x8f, - 0x73, 0x89, 0x31, 0xd0, 0x3b, 0x2f, 0xff, 0x1f, 0x63, 0xc9, 0xb9, 0xdf, 0x75, 0xde, 0xe8, 0x51, - 0xaf, 0xf3, 0xc6, 0x1e, 0x92, 0xf9, 0xf4, 0xd7, 0x0b, 0xf0, 0xf4, 0x00, 0xdf, 0xc8, 0x0c, 0x8a, - 0xb0, 0x0e, 0x09, 0x8a, 0x38, 0x0f, 0x23, 0x11, 0x09, 0x83, 0xec, 0x7e, 0xc7, 0x3c, 0x87, 0x18, - 0x04, 0x3d, 0x05, 0x45, 0x27, 0xf4, 0xc4, 0x76, 0xa7, 0x6e, 0xfb, 0x17, 0x36, 0x56, 0x30, 0x2d, - 0xa7, 0x5f, 0xba, 0xb2, 0x2d, 0x43, 0xab, 0xf2, 0x49, 0xd6, 0xdc, 0x2f, 0x52, 0x8b, 0x1b, 0x34, - 0x0a, 0x8a, 0x35, 0x5f, 0x7b, 0x1d, 0xce, 0xf6, 0x9f, 0x21, 0xe8, 0x05, 0x18, 0xdf, 0x8e, 0x1c, - 0xdf, 0xdd, 0x61, 0x89, 0xcd, 0xe5, 0x98, 0x30, 0x57, 0x78, 0x5d, 0x8c, 0x4d, 0x1c, 0xfb, 0x0f, - 0x0a, 0xbd, 0x29, 0x72, 0x21, 0x30, 0xcc, 0x08, 0x8b, 0xf1, 0x2b, 0xf4, 0x19, 0xbf, 0x37, 0xa1, - 0x9c, 0x30, 0x4f, 0x7c, 0x52, 0x17, 0x92, 0x24, 0xb7, 0x60, 0x32, 0xb6, 0xd7, 0x6c, 0x09, 0xe2, - 0x58, 0xb1, 0xa1, 0x22, 0xbf, 0xa9, 0x93, 0x0f, 0x09, 0x91, 0x9f, 0x39, 0x47, 0x5b, 0x82, 0x93, - 0x46, 0x2e, 0x45, 0xee, 0x88, 0xcc, 0xaf, 0x51, 0x55, 0x74, 0xce, 0x46, 0x06, 0x8e, 0xbb, 0x6a, - 0xd8, 0xbf, 0x51, 0x80, 0x27, 0xfa, 0x4a, 0x36, 0x7d, 0xd7, 0x6b, 0xdd, 0xe3, 0xae, 0xf7, 0xc8, - 0x13, 0xd4, 0x1c, 0xe0, 0x91, 0x07, 0x33, 0xc0, 0xcf, 0x41, 0xd9, 0xf3, 0x63, 0xe2, 0xb6, 0x23, - 0x3e, 0x68, 0x86, 0x5b, 0xde, 0x8a, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0xa3, 0xfe, 0x53, 0x8d, 0xee, - 0x72, 0x3f, 0xb6, 0xa3, 0xf4, 0x32, 0x4c, 0x3a, 0x61, 0xc8, 0xf1, 0xd8, 0xbd, 0x5a, 0x26, 0xde, - 0x6e, 0xc1, 0x04, 0xe2, 0x34, 0xae, 0x31, 0x87, 0x47, 0xfb, 0xcd, 0x61, 0xfb, 0x4f, 0x2d, 0xa8, - 0x60, 0x52, 0xe7, 0x39, 0x38, 0xd1, 0x2d, 0x31, 0x44, 0x56, 0x1e, 0xc9, 0x21, 0xd8, 0xa3, 0xda, - 0x1e, 0x4b, 0x9a, 0xd0, 0x6b, 0xb0, 0xbb, 0xf3, 0x82, 0x16, 0x86, 0xca, 0x0b, 0xaa, 0x32, 0x43, - 0x16, 0xfb, 0x67, 0x86, 0xb4, 0xdf, 0x1d, 0xa3, 0xdd, 0x0b, 0x83, 0xc5, 0x88, 0xd4, 0x62, 0xfa, - 0x7d, 0xdb, 0x51, 0x33, 0xfb, 0xd6, 0xe0, 0x75, 0xbc, 0x8a, 0x69, 0x79, 0xea, 0x10, 0xa0, 0x30, - 0x54, 0xb4, 0x51, 0xf1, 0xd0, 0x68, 0xa3, 0x97, 0x61, 0x32, 0x8e, 0x77, 0x36, 0x22, 0x6f, 0xcf, - 0x49, 0xa8, 0x69, 0x21, 0xdc, 0x32, 0x74, 0x84, 0xc0, 0xe6, 0x65, 0x0d, 0xc4, 0x69, 0x5c, 0xb4, - 0x0c, 0xd3, 0x3a, 0xe6, 0x87, 0x44, 0x09, 0xf3, 0xc2, 0xe0, 0x33, 0x41, 0x39, 0xe8, 0xeb, 0x28, - 0x21, 0x81, 0x80, 0xbb, 0xeb, 0x50, 0x89, 0x95, 0x2a, 0xa4, 0x0d, 0x19, 0x4d, 0x4b, 0xac, 0x14, - 0x1d, 0xda, 0x96, 0xae, 0x1a, 0x68, 0x0d, 0x4e, 0xf1, 0x89, 0xc1, 0x1e, 0xb7, 0x55, 0x3d, 0xe2, - 0x5e, 0x33, 0x1f, 0x16, 0x84, 0x4e, 0x2d, 0x77, 0xa3, 0xe0, 0x5e, 0xf5, 0xa8, 0xdd, 0xa0, 0x8a, - 0x57, 0x96, 0x84, 0xfd, 0xaa, 0xec, 0x06, 0x45, 0x66, 0xa5, 0x86, 0x4d, 0x3c, 0xf4, 0x69, 0x78, - 0x5c, 0xff, 0xe5, 0xfe, 0x6e, 0xfc, 0x50, 0x67, 0x49, 0x84, 0x53, 0xaa, 0x3c, 0x84, 0xcb, 0x3d, - 0xd1, 0x6a, 0xb8, 0x5f, 0x7d, 0xb4, 0x0d, 0x67, 0x15, 0xe8, 0x22, 0x35, 0xd2, 0xc2, 0xc8, 0x8b, - 0x49, 0xd5, 0x89, 0xc9, 0xf5, 0xa8, 0xc9, 0x02, 0x30, 0x2b, 0x3a, 0xa1, 0xfa, 0xb2, 0x97, 0x5c, - 0xee, 0x85, 0x89, 0x57, 0xf1, 0x3d, 0xa8, 0xa0, 0x79, 0xa8, 0x10, 0xdf, 0xd9, 0x6e, 0x92, 0xf5, - 0xc5, 0x15, 0x16, 0x96, 0x69, 0x9c, 0x21, 0x5d, 0x94, 0x00, 0xac, 0x71, 0xd4, 0x4d, 0xe0, 0x44, - 0xdf, 0x04, 0xfc, 0x1b, 0x70, 0xba, 0xe1, 0x86, 0x54, 0x0f, 0xf0, 0x5c, 0xb2, 0xe0, 0xba, 0xd4, - 0xd0, 0xa7, 0x1f, 0x86, 0xe7, 0x45, 0x55, 0xd7, 0xdc, 0xcb, 0x8b, 0x1b, 0x5d, 0x38, 0xb8, 0x67, - 0x4d, 0xba, 0xc6, 0xc2, 0x28, 0xd8, 0xef, 0xcc, 0x9c, 0x4a, 0xaf, 0xb1, 0x0d, 0x5a, 0x88, 0x39, - 0x0c, 0x5d, 0x01, 0xc4, 0x7c, 0x26, 0x2e, 0x27, 0x49, 0xa8, 0x14, 0x8f, 0x99, 0xd3, 0xac, 0x4b, - 0x67, 0x45, 0x0d, 0x74, 0xa9, 0x0b, 0x03, 0xf7, 0xa8, 0x65, 0xff, 0x89, 0x05, 0x93, 0x6a, 0xbd, - 0x3e, 0x00, 0xaf, 0xa1, 0x66, 0xda, 0x6b, 0x68, 0xf9, 0xe8, 0x12, 0x8f, 0xb5, 0xbc, 0xcf, 0xd5, - 0xf3, 0x57, 0xc7, 0x01, 0xb4, 0x54, 0x54, 0x1b, 0x92, 0xd5, 0x77, 0x43, 0x7a, 0x64, 0x25, 0x52, - 0xaf, 0x18, 0xac, 0xd2, 0xc3, 0x8d, 0xc1, 0xda, 0x84, 0x33, 0x52, 0x5d, 0xe0, 0x47, 0x32, 0x97, - 0x83, 0x58, 0x09, 0xb8, 0x72, 0xf5, 0x29, 0x41, 0xe8, 0xcc, 0x4a, 0x2f, 0x24, 0xdc, 0xbb, 0x6e, - 0x4a, 0x4b, 0x19, 0x3b, 0x4c, 0x4b, 0xd1, 0x6b, 0x7a, 0xb5, 0x2e, 0xb3, 0x1a, 0x66, 0xd6, 0xf4, - 0xea, 0xa5, 0x4d, 0xac, 0x71, 0x7a, 0x0b, 0xf6, 0x4a, 0x4e, 0x82, 0x1d, 0x86, 0x16, 0xec, 0x52, - 0xc4, 0x8c, 0xf7, 0x15, 0x31, 0xf2, 0x14, 0x68, 0xa2, 0xef, 0x29, 0xd0, 0x2b, 0x30, 0xe5, 0xf9, - 0x3b, 0x24, 0xf2, 0x12, 0x52, 0x63, 0x6b, 0x41, 0x3c, 0xe0, 0xae, 0xb6, 0xf5, 0x95, 0x14, 0x14, - 0x67, 0xb0, 0xd3, 0x72, 0x71, 0x6a, 0x00, 0xb9, 0xd8, 0x67, 0x37, 0x3a, 0x91, 0xcf, 0x6e, 0x74, - 0xf2, 0xe8, 0xbb, 0xd1, 0xf4, 0xb1, 0xee, 0x46, 0x28, 0x97, 0xdd, 0x68, 0x20, 0x41, 0x6f, 0x18, - 0x74, 0xa7, 0x0f, 0x31, 0xe8, 0xfa, 0x6d, 0x45, 0x67, 0xee, 0x7b, 0x2b, 0xea, 0xbd, 0xcb, 0x3c, - 0x76, 0x5f, 0xbb, 0xcc, 0xd7, 0x0a, 0x70, 0x46, 0xcb, 0x61, 0x3a, 0xfb, 0xbd, 0x3a, 0x95, 0x44, - 0x2c, 0x31, 0x2e, 0x77, 0x47, 0x31, 0x9c, 0xd8, 0xb4, 0x3f, 0x9c, 0x82, 0x60, 0x03, 0x8b, 0xf9, - 0x82, 0x91, 0x88, 0xa5, 0x98, 0xc9, 0x0a, 0xe9, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x9d, 0x5f, 0xf4, - 0xb7, 0xf0, 0xaf, 0xcd, 0x86, 0x9d, 0x2f, 0x6a, 0x10, 0x36, 0xf1, 0xd0, 0xb3, 0x9c, 0x09, 0x13, - 0x10, 0x54, 0x50, 0x4f, 0x88, 0x27, 0x1f, 0xa4, 0x4c, 0x50, 0x50, 0xd9, 0x1c, 0xe6, 0xf4, 0x57, - 0xea, 0x6e, 0x0e, 0xbb, 0x7c, 0x53, 0x18, 0xf6, 0xff, 0xb6, 0xe0, 0x89, 0x9e, 0x43, 0xf1, 0x00, - 0x36, 0xdf, 0xfd, 0xf4, 0xe6, 0xbb, 0x99, 0x97, 0xb9, 0x61, 0xf4, 0xa2, 0xcf, 0x46, 0xfc, 0xef, - 0x2d, 0x98, 0xd2, 0xf8, 0x0f, 0xa0, 0xab, 0x5e, 0xba, 0xab, 0xf9, 0x59, 0x56, 0x95, 0xae, 0xbe, - 0xfd, 0x09, 0xeb, 0x1b, 0xbf, 0xc3, 0x58, 0x70, 0xe5, 0xdb, 0xfd, 0x87, 0x9c, 0xdd, 0x77, 0x60, - 0x94, 0x65, 0x65, 0x8d, 0xf3, 0xb9, 0x4b, 0x49, 0xf3, 0x67, 0xde, 0xbc, 0xfa, 0x2e, 0x85, 0xfd, - 0x8d, 0xb1, 0x60, 0xc8, 0x12, 0x20, 0x79, 0x31, 0x95, 0xe6, 0x35, 0xe1, 0x3e, 0xa7, 0x13, 0x20, - 0x89, 0x72, 0xac, 0x30, 0xec, 0x16, 0xcc, 0xa4, 0x89, 0x2f, 0x91, 0x3a, 0xbb, 0xb2, 0x1e, 0xa8, - 0x9b, 0xf3, 0x50, 0x71, 0x58, 0xad, 0xd5, 0xb6, 0x93, 0x7d, 0x25, 0x68, 0x41, 0x02, 0xb0, 0xc6, - 0xb1, 0x7f, 0xc7, 0x82, 0x53, 0x3d, 0x3a, 0x93, 0xa3, 0xdb, 0x60, 0xa2, 0xa5, 0x40, 0xaf, 0x0d, - 0xf7, 0x23, 0x30, 0x56, 0x23, 0x75, 0x47, 0x5e, 0x8a, 0x1a, 0x32, 0x77, 0x89, 0x17, 0x63, 0x09, - 0xb7, 0xff, 0xbb, 0x05, 0x27, 0xd2, 0x6d, 0x8d, 0xa9, 0xd4, 0xe4, 0x9d, 0x59, 0xf2, 0x62, 0x37, - 0xd8, 0x23, 0x51, 0x87, 0xf6, 0x9c, 0xb7, 0x5a, 0x49, 0xcd, 0x85, 0x2e, 0x0c, 0xdc, 0xa3, 0x16, - 0x4b, 0xd0, 0x52, 0x53, 0xa3, 0x2d, 0x67, 0xca, 0x8d, 0x3c, 0x67, 0x8a, 0xfe, 0x98, 0xe6, 0xc5, - 0x91, 0x62, 0x89, 0x4d, 0xfe, 0xf6, 0xfb, 0x23, 0xa0, 0xfc, 0x8a, 0xd9, 0xf5, 0x5b, 0x4e, 0x97, - 0x97, 0xa9, 0xa7, 0xa4, 0x8a, 0x03, 0x3c, 0x25, 0x25, 0x27, 0xc3, 0xc8, 0xbd, 0xae, 0xc6, 0xf8, - 0xe9, 0x85, 0x79, 0x48, 0xa8, 0x7a, 0xb8, 0xa5, 0x41, 0xd8, 0xc4, 0xa3, 0x2d, 0x69, 0x7a, 0x7b, - 0x84, 0x57, 0x1a, 0x4d, 0xb7, 0x64, 0x55, 0x02, 0xb0, 0xc6, 0xa1, 0x2d, 0xa9, 0x79, 0xf5, 0xba, - 0x30, 0xc5, 0x55, 0x4b, 0xe8, 0xe8, 0x60, 0x06, 0xa1, 0x18, 0x3b, 0x41, 0xb0, 0x2b, 0xb4, 0x53, - 0x85, 0x71, 0x39, 0x08, 0x76, 0x31, 0x83, 0x50, 0x7d, 0xca, 0x0f, 0xa2, 0x16, 0x7b, 0xc5, 0xa9, - 0xa6, 0xb8, 0x08, 0xad, 0x54, 0xe9, 0x53, 0xd7, 0xba, 0x51, 0x70, 0xaf, 0x7a, 0x74, 0x06, 0x86, - 0x11, 0xa9, 0x79, 0x6e, 0x62, 0x52, 0x83, 0xf4, 0x0c, 0xdc, 0xe8, 0xc2, 0xc0, 0x3d, 0x6a, 0xa1, - 0x05, 0x38, 0x21, 0xfd, 0xc2, 0x65, 0xe8, 0xdc, 0x78, 0x3a, 0x54, 0x07, 0xa7, 0xc1, 0x38, 0x8b, - 0x4f, 0xa5, 0x4d, 0x4b, 0x44, 0xcd, 0x32, 0x25, 0xd6, 0x90, 0x36, 0x32, 0x9a, 0x16, 0x2b, 0x0c, - 0xfb, 0x4b, 0x45, 0xba, 0x3b, 0xf6, 0xc9, 0x9e, 0xfb, 0xc0, 0x2e, 0xcb, 0xd3, 0x33, 0x72, 0x64, - 0x80, 0x19, 0xf9, 0x22, 0x4c, 0xdc, 0x8a, 0x03, 0x5f, 0x5d, 0x44, 0x97, 0xfa, 0x5e, 0x44, 0x1b, - 0x58, 0xbd, 0x2f, 0xa2, 0x47, 0xf3, 0xba, 0x88, 0x1e, 0xbb, 0xcf, 0x8b, 0xe8, 0xef, 0x95, 0x40, - 0xe5, 0x9f, 0xbc, 0x46, 0x92, 0xdb, 0x41, 0xb4, 0xeb, 0xf9, 0x0d, 0xe6, 0x4f, 0xff, 0x5d, 0x0b, - 0x26, 0xf8, 0x7a, 0x59, 0x35, 0x7d, 0x6b, 0xeb, 0x39, 0xe5, 0x49, 0x4c, 0x31, 0x9b, 0xdb, 0x32, - 0x18, 0x65, 0x1e, 0x09, 0x30, 0x41, 0x38, 0xd5, 0x22, 0xf4, 0x0b, 0x00, 0xf2, 0xdc, 0xb2, 0x2e, - 0x45, 0xe6, 0x4a, 0x3e, 0xed, 0xc3, 0xa4, 0xae, 0x75, 0xd3, 0x2d, 0xc5, 0x04, 0x1b, 0x0c, 0xd1, - 0xd7, 0xb2, 0xaf, 0xdc, 0x7d, 0xee, 0x58, 0xc6, 0x66, 0x10, 0xaf, 0x63, 0x0c, 0x63, 0x9e, 0xdf, - 0xa0, 0xf3, 0x44, 0xdc, 0xdd, 0xff, 0x64, 0xaf, 0x58, 0x94, 0xd5, 0xc0, 0xa9, 0x55, 0x9d, 0xa6, - 0xe3, 0xbb, 0x24, 0x5a, 0xe1, 0xe8, 0xe6, 0xab, 0x35, 0xac, 0x00, 0x4b, 0x42, 0x5d, 0x89, 0x40, - 0x4b, 0x83, 0x24, 0x02, 0x3d, 0xfb, 0x49, 0x98, 0xee, 0xfa, 0x98, 0x43, 0x39, 0x19, 0xdf, 0xbf, - 0x7f, 0xb2, 0xfd, 0x2f, 0x46, 0xf5, 0xa6, 0x75, 0x2d, 0xa8, 0xf1, 0x74, 0x94, 0x91, 0xfe, 0xa2, - 0x42, 0xf7, 0xcc, 0x71, 0x8a, 0x18, 0x2f, 0xdf, 0xa8, 0x42, 0x6c, 0xb2, 0xa4, 0x73, 0x34, 0x74, - 0x22, 0xe2, 0x1f, 0xf7, 0x1c, 0xdd, 0x50, 0x4c, 0xb0, 0xc1, 0x10, 0xed, 0xa4, 0xbc, 0x0c, 0x2f, - 0x1d, 0xdd, 0xcb, 0x90, 0x85, 0xba, 0xf6, 0xca, 0xb7, 0xf7, 0x6d, 0x0b, 0xa6, 0xfc, 0xd4, 0xcc, - 0x15, 0xf7, 0x38, 0x5b, 0xc7, 0xb1, 0x2a, 0x78, 0xfa, 0xe2, 0x74, 0x19, 0xce, 0xf0, 0xef, 0xb5, - 0xa5, 0x95, 0x86, 0xdc, 0xd2, 0x74, 0x5e, 0xdb, 0xd1, 0x7e, 0x79, 0x6d, 0x91, 0xaf, 0x32, 0x71, - 0x8f, 0xe5, 0x9e, 0x89, 0x1b, 0x7a, 0x64, 0xe1, 0xbe, 0x09, 0x15, 0x37, 0x22, 0x4e, 0x72, 0x9f, - 0x49, 0x99, 0xd9, 0x25, 0xf6, 0xa2, 0x24, 0x80, 0x35, 0x2d, 0xfb, 0xdf, 0x15, 0xe1, 0xa4, 0x1c, - 0x11, 0xe9, 0x81, 0x45, 0xf7, 0x47, 0xce, 0x57, 0x2b, 0xb7, 0x6a, 0x7f, 0xbc, 0x2c, 0x01, 0x58, - 0xe3, 0x50, 0x7d, 0xac, 0x1d, 0x93, 0xf5, 0x90, 0xf8, 0xab, 0xde, 0x76, 0x2c, 0xee, 0x1f, 0xd5, - 0x42, 0xb9, 0xae, 0x41, 0xd8, 0xc4, 0xa3, 0xca, 0x38, 0xd7, 0x8b, 0xe3, 0xac, 0x43, 0xa3, 0xd0, - 0xb7, 0xb1, 0x84, 0xa3, 0x5f, 0xeb, 0x99, 0xce, 0x3f, 0x1f, 0x57, 0xde, 0x2e, 0xc7, 0xb3, 0x21, - 0xf3, 0xf8, 0xbf, 0x6d, 0xc1, 0x89, 0xdd, 0x54, 0x2c, 0x92, 0x14, 0xc9, 0x47, 0x8c, 0x9a, 0x4d, - 0x07, 0x38, 0xe9, 0x29, 0x9c, 0x2e, 0x8f, 0x71, 0x96, 0xbb, 0xfd, 0x3f, 0x2d, 0x30, 0xc5, 0xd3, - 0x60, 0x9a, 0x95, 0xf1, 0xfe, 0x4e, 0xe1, 0x90, 0xf7, 0x77, 0xa4, 0x12, 0x56, 0x1c, 0x4c, 0xe9, - 0x1f, 0x19, 0x42, 0xe9, 0x2f, 0xf5, 0xd5, 0xda, 0x9e, 0x82, 0x62, 0xdb, 0xab, 0x09, 0xbd, 0x5d, - 0xdf, 0x36, 0xae, 0x2c, 0x61, 0x5a, 0x6e, 0xff, 0xd3, 0x92, 0xb6, 0xd3, 0x85, 0x07, 0xea, 0x8f, - 0x45, 0xb7, 0xeb, 0x2a, 0x08, 0x9a, 0xf7, 0xfc, 0x5a, 0x57, 0x10, 0xf4, 0x4f, 0x0f, 0xef, 0x60, - 0xcc, 0x07, 0xa8, 0x5f, 0x0c, 0xf4, 0xd8, 0x21, 0xde, 0xc5, 0xb7, 0xa0, 0x4c, 0x4d, 0x1b, 0x76, - 0xe0, 0x56, 0x4e, 0x35, 0xaa, 0x7c, 0x59, 0x94, 0xdf, 0x3d, 0x98, 0xfd, 0xa9, 0xe1, 0x9b, 0x25, - 0x6b, 0x63, 0x45, 0x1f, 0xc5, 0x50, 0xa1, 0xbf, 0x99, 0x23, 0xb4, 0x30, 0x9a, 0xae, 0x2b, 0x59, - 0x24, 0x01, 0xb9, 0x78, 0x59, 0x6b, 0x3e, 0xc8, 0x87, 0x0a, 0x7b, 0x4a, 0x84, 0x31, 0xe5, 0xb6, - 0xd5, 0x86, 0x72, 0x47, 0x96, 0x80, 0xbb, 0x07, 0xb3, 0x2f, 0x0f, 0xcf, 0x54, 0x55, 0xc7, 0x9a, - 0x85, 0xfd, 0xce, 0x88, 0x9e, 0xbb, 0x22, 0xf6, 0xfd, 0xc7, 0x62, 0xee, 0xbe, 0x94, 0x99, 0xbb, - 0xe7, 0xbb, 0xe6, 0xee, 0x94, 0x7e, 0xf2, 0x22, 0x35, 0x1b, 0x1f, 0xf4, 0x06, 0x7b, 0xb8, 0x1d, - 0xcf, 0x34, 0x8b, 0x37, 0xdb, 0x5e, 0x44, 0xe2, 0x8d, 0xa8, 0xed, 0x7b, 0x7e, 0x43, 0xbc, 0xa9, - 0x67, 0x68, 0x16, 0x29, 0x30, 0xce, 0xe2, 0xb3, 0xf7, 0xf8, 0x3a, 0xbe, 0x7b, 0xd3, 0xd9, 0xe3, - 0xb3, 0xca, 0x08, 0x07, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xd8, 0xef, 0xb2, 0xbb, 0x5b, 0x23, 0x02, - 0x83, 0xce, 0x89, 0x26, 0x7b, 0xbb, 0x85, 0xc7, 0x12, 0xab, 0x39, 0xc1, 0x1f, 0x6c, 0xe1, 0x30, - 0x74, 0x1b, 0xc6, 0xb6, 0x79, 0x2e, 0xf4, 0x7c, 0x92, 0x8f, 0x89, 0xc4, 0xea, 0x2c, 0x3f, 0xa8, - 0xcc, 0xb2, 0x7e, 0x57, 0xff, 0xc4, 0x92, 0x9b, 0xfd, 0xde, 0x08, 0x9c, 0xc8, 0xbc, 0xee, 0x91, - 0x4a, 0x85, 0x52, 0x38, 0x34, 0x15, 0xca, 0x67, 0x00, 0x6a, 0x24, 0x6c, 0x06, 0x1d, 0xa6, 0xe6, - 0x8c, 0x0c, 0xad, 0xe6, 0x28, 0xcd, 0x78, 0x49, 0x51, 0xc1, 0x06, 0x45, 0x11, 0x40, 0xcd, 0x33, - 0xab, 0x64, 0x02, 0xa8, 0x8d, 0xfc, 0x7f, 0xa3, 0x0f, 0x36, 0xff, 0x9f, 0x07, 0x27, 0x78, 0x13, - 0x55, 0x9c, 0xc3, 0x7d, 0x84, 0x33, 0x30, 0x0f, 0xd9, 0xa5, 0x34, 0x19, 0x9c, 0xa5, 0xfb, 0x30, - 0x1f, 0xef, 0x41, 0x1f, 0x85, 0x8a, 0xfc, 0xce, 0xf1, 0x4c, 0x45, 0xc7, 0x8a, 0xc9, 0x69, 0xc0, - 0x1e, 0xd5, 0x11, 0x3f, 0xed, 0x6f, 0x15, 0xa8, 0x56, 0xca, 0xff, 0xa9, 0x98, 0xdf, 0x67, 0x60, - 0xd4, 0x69, 0x27, 0x3b, 0x41, 0x57, 0xf6, 0xf9, 0x05, 0x56, 0x8a, 0x05, 0x14, 0xad, 0xc2, 0x48, - 0x4d, 0xc7, 0x71, 0x0e, 0x33, 0x8a, 0xfa, 0x80, 0xcf, 0x49, 0x08, 0x66, 0x54, 0xd0, 0x93, 0x30, - 0x92, 0x38, 0x8d, 0xd4, 0xbb, 0x90, 0x5b, 0x4e, 0x23, 0xc6, 0xac, 0xd4, 0xdc, 0x34, 0x47, 0x0e, - 0xd9, 0x34, 0x5f, 0x86, 0xc9, 0xd8, 0x6b, 0xf8, 0x4e, 0xd2, 0x8e, 0x88, 0x71, 0x99, 0xa4, 0xfd, - 0x03, 0x4c, 0x20, 0x4e, 0xe3, 0xda, 0xef, 0x57, 0xe0, 0x74, 0xaf, 0xf7, 0xbb, 0xf3, 0xf6, 0x86, - 0xef, 0xc5, 0xe3, 0xc1, 0x79, 0xc3, 0xf7, 0xe1, 0xde, 0x34, 0xbc, 0xe1, 0x9b, 0x86, 0x37, 0xfc, - 0xd7, 0x2c, 0xa8, 0x28, 0x27, 0x70, 0xe1, 0xc8, 0xfa, 0xfa, 0x31, 0xbc, 0x91, 0x2e, 0x59, 0x08, - 0x5f, 0x60, 0xf9, 0x17, 0x6b, 0xe6, 0xc7, 0xe7, 0x1e, 0x7f, 0xcf, 0x06, 0x0d, 0xe5, 0x1e, 0xaf, - 0x62, 0x07, 0x4a, 0x79, 0xc4, 0x0e, 0xf4, 0xf9, 0x54, 0x3d, 0x63, 0x07, 0xbe, 0x6d, 0xc1, 0xb8, - 0xf3, 0x56, 0x3b, 0x22, 0x4b, 0x64, 0x6f, 0x3d, 0x8c, 0x85, 0x80, 0x7d, 0x23, 0xff, 0x06, 0x2c, - 0x68, 0x26, 0x22, 0x4d, 0xae, 0x2e, 0xc0, 0x66, 0x13, 0x52, 0xb1, 0x02, 0x63, 0x79, 0xc4, 0x0a, - 0xf4, 0x6a, 0xce, 0xa1, 0xb1, 0x02, 0x2f, 0xc3, 0xa4, 0xdb, 0x0c, 0x7c, 0xb2, 0x11, 0x05, 0x49, - 0xe0, 0x06, 0x4d, 0xa1, 0x4c, 0x2b, 0x91, 0xb0, 0x68, 0x02, 0x71, 0x1a, 0xb7, 0x5f, 0xa0, 0x41, - 0xe5, 0xa8, 0x81, 0x06, 0xf0, 0x90, 0x02, 0x0d, 0xfe, 0xbc, 0x00, 0xb3, 0x87, 0x7c, 0x54, 0xf4, - 0x12, 0x4c, 0x04, 0x51, 0xc3, 0xf1, 0xbd, 0xb7, 0x78, 0x9c, 0x67, 0x29, 0x9d, 0x86, 0x63, 0xdd, - 0x80, 0xe1, 0x14, 0xa6, 0x74, 0x45, 0x1e, 0xed, 0xe3, 0x8a, 0xfc, 0x71, 0x18, 0x4f, 0x88, 0xd3, - 0x12, 0x7e, 0x17, 0xc2, 0x00, 0xd2, 0x17, 0x4a, 0x1a, 0x84, 0x4d, 0x3c, 0x3a, 0x8d, 0xa6, 0x1c, - 0xd7, 0x25, 0x71, 0x2c, 0x7d, 0x8d, 0xc5, 0xe1, 0x4c, 0x6e, 0x8e, 0xcc, 0xec, 0xcc, 0x6b, 0x21, - 0xc5, 0x02, 0x67, 0x58, 0xd2, 0xc6, 0x3b, 0xcd, 0x26, 0x0f, 0x2b, 0x20, 0xf2, 0xa5, 0x67, 0x9d, - 0x15, 0x42, 0x83, 0xb0, 0x89, 0x67, 0xff, 0x66, 0x01, 0x9e, 0xba, 0xa7, 0x78, 0x19, 0xd8, 0x0d, - 0xbc, 0x1d, 0x93, 0x28, 0x7b, 0x21, 0x73, 0x3d, 0x26, 0x11, 0x66, 0x10, 0x3e, 0x4a, 0x61, 0x68, - 0x3c, 0x31, 0x93, 0x77, 0xd4, 0x01, 0x1f, 0xa5, 0x14, 0x0b, 0x9c, 0x61, 0x99, 0x1d, 0xa5, 0x91, - 0x01, 0x47, 0xe9, 0xef, 0x17, 0xe0, 0xe9, 0x01, 0x84, 0x70, 0x8e, 0xd1, 0x19, 0xe9, 0xe8, 0x96, - 0xe2, 0xc3, 0x89, 0x6e, 0xb9, 0xdf, 0xe1, 0x7a, 0xb7, 0x00, 0x67, 0xfb, 0xcb, 0x42, 0xf4, 0x33, - 0xd4, 0x88, 0x92, 0xce, 0x16, 0x66, 0x64, 0xcc, 0x29, 0x6e, 0x40, 0xa5, 0x40, 0x38, 0x8b, 0x8b, - 0xe6, 0x00, 0x42, 0x27, 0xd9, 0x89, 0x2f, 0xee, 0x7b, 0x71, 0x22, 0x62, 0x3a, 0xa7, 0xf8, 0x51, - 0xb8, 0x2c, 0xc5, 0x06, 0x06, 0x65, 0xc7, 0xfe, 0x2d, 0x05, 0xd7, 0x82, 0x84, 0x57, 0xe2, 0x7a, - 0xdc, 0x29, 0x99, 0x87, 0xd0, 0x00, 0xe1, 0x2c, 0x2e, 0x65, 0xc7, 0x2e, 0x5b, 0x78, 0x43, 0xc5, - 0x33, 0xf6, 0x94, 0xdd, 0xaa, 0x2a, 0xc5, 0x06, 0x46, 0x36, 0xe6, 0xa7, 0x34, 0x40, 0xcc, 0xcf, - 0x3f, 0x2e, 0xc0, 0x13, 0x7d, 0xf7, 0xd2, 0xc1, 0x16, 0xe0, 0xa3, 0x17, 0xec, 0x73, 0x7f, 0x73, - 0x67, 0xc8, 0x10, 0x96, 0x3f, 0xed, 0x33, 0xd3, 0x44, 0x08, 0x4b, 0x76, 0xab, 0xb0, 0x86, 0xdd, - 0x2a, 0x1e, 0xa1, 0xf1, 0xec, 0x8a, 0x5a, 0x19, 0x19, 0x22, 0x6a, 0x25, 0xf3, 0x31, 0x4a, 0x03, - 0x2e, 0xe4, 0xef, 0xf7, 0x1f, 0x5e, 0xaa, 0x7b, 0x0f, 0x74, 0x3c, 0xb5, 0x04, 0x27, 0x3d, 0x9f, - 0xe5, 0xa4, 0xdd, 0x6c, 0x6f, 0x8b, 0x88, 0xdf, 0x42, 0xfa, 0xb9, 0xa5, 0x95, 0x0c, 0x1c, 0x77, - 0xd5, 0x78, 0x04, 0xa3, 0x88, 0xee, 0x73, 0x48, 0x3f, 0x03, 0x15, 0x45, 0x9b, 0x7b, 0x46, 0xaa, - 0x0f, 0xda, 0xe5, 0x19, 0xa9, 0xbe, 0xa6, 0x81, 0x45, 0x47, 0x62, 0x97, 0x74, 0xb2, 0x33, 0xf3, - 0x2a, 0xe9, 0xb0, 0x5b, 0x52, 0xfb, 0x63, 0x30, 0xa1, 0x8c, 0xc8, 0x41, 0x73, 0xa6, 0xda, 0xef, - 0x8c, 0xc2, 0x64, 0x2a, 0xb3, 0x43, 0xea, 0xcc, 0xc6, 0x3a, 0xf4, 0xcc, 0x86, 0x79, 0xba, 0xb6, - 0x7d, 0x99, 0x95, 0xd8, 0xf0, 0x74, 0x6d, 0xfb, 0x04, 0x73, 0x18, 0x35, 0xdd, 0x6b, 0x51, 0x07, - 0xb7, 0x7d, 0xe1, 0x91, 0xa6, 0x4c, 0xf7, 0x25, 0x56, 0x8a, 0x05, 0x14, 0x7d, 0xd1, 0x82, 0x89, - 0x98, 0x1d, 0x08, 0xf2, 0x13, 0x2f, 0xf1, 0x41, 0xaf, 0xe4, 0xf1, 0xaa, 0xae, 0xc8, 0x62, 0xc2, - 0x2e, 0xb3, 0xcd, 0x12, 0x9c, 0xe2, 0x88, 0xbe, 0x62, 0x99, 0xef, 0x09, 0x8f, 0xe6, 0xe1, 0x49, - 0x99, 0x4d, 0x9c, 0xc1, 0x8f, 0x4a, 0xee, 0xfd, 0xac, 0xb0, 0x7e, 0x66, 0x7c, 0xec, 0xc1, 0x3d, - 0x33, 0xfe, 0x51, 0xa8, 0xb4, 0x1c, 0xdf, 0xab, 0x93, 0x38, 0xe1, 0x27, 0x44, 0x32, 0x9f, 0x8f, - 0x2c, 0xc4, 0x1a, 0x4e, 0x37, 0xbb, 0x98, 0x75, 0x2c, 0x31, 0x8e, 0x74, 0xd8, 0x66, 0xb7, 0xa9, - 0x8b, 0xb1, 0x89, 0x63, 0x9e, 0x3f, 0xc1, 0x43, 0x3d, 0x7f, 0x1a, 0x3f, 0xe4, 0xfc, 0xe9, 0x1f, - 0x5a, 0x70, 0xa6, 0xe7, 0x57, 0x7b, 0x74, 0x7d, 0x94, 0xec, 0xf7, 0x8b, 0x70, 0xaa, 0x47, 0x8a, - 0x16, 0xd4, 0x39, 0xb6, 0xf7, 0xb1, 0x45, 0x0e, 0x98, 0xc9, 0xbe, 0x93, 0x78, 0xb8, 0xd3, 0x5f, - 0x7d, 0x02, 0x5b, 0x7c, 0xb0, 0x27, 0xb0, 0xc6, 0xb4, 0x1c, 0x79, 0xa8, 0xd3, 0xb2, 0x74, 0xc8, - 0xb4, 0x7c, 0xbf, 0x08, 0xc6, 0x73, 0xf7, 0xe8, 0x0b, 0x66, 0xda, 0x24, 0x2b, 0xaf, 0x14, 0x3f, - 0x9c, 0xb8, 0x4a, 0xbb, 0xc4, 0x9b, 0xd3, 0x2b, 0x0b, 0x53, 0x56, 0x02, 0x14, 0x06, 0x90, 0x00, - 0x4d, 0x99, 0x9f, 0xaa, 0x98, 0x7f, 0x7e, 0xaa, 0x4a, 0x36, 0x37, 0x15, 0xfa, 0x5d, 0x0b, 0x66, - 0x5a, 0x7d, 0xf2, 0x28, 0xe6, 0x93, 0x36, 0xa0, 0x5f, 0x96, 0xc6, 0xea, 0x93, 0x77, 0x0e, 0x66, - 0xfb, 0xa6, 0xaf, 0xc4, 0x7d, 0x5b, 0x65, 0xff, 0x4d, 0x8b, 0xaf, 0xe2, 0xcc, 0x57, 0xd0, 0xdb, - 0xac, 0x75, 0x8f, 0x6d, 0xf6, 0x39, 0xf6, 0x02, 0x5b, 0xfd, 0x32, 0x71, 0x9a, 0x62, 0x3b, 0x36, - 0x1f, 0x53, 0x63, 0xe5, 0x58, 0x61, 0xb0, 0x37, 0x13, 0x9a, 0xcd, 0xe0, 0xf6, 0xc5, 0x56, 0x98, - 0x74, 0xc4, 0xc6, 0xac, 0xdf, 0x4c, 0x50, 0x10, 0x6c, 0x60, 0xd9, 0x7f, 0xbb, 0xc0, 0x67, 0xa0, - 0xb8, 0xa4, 0x7c, 0x29, 0x93, 0xa0, 0x7b, 0xf0, 0xfb, 0xbd, 0xcf, 0x03, 0xb8, 0xea, 0xf1, 0xa5, - 0x7c, 0x1e, 0xcf, 0xd7, 0x8f, 0x39, 0x99, 0x2f, 0xba, 0xcb, 0x32, 0x6c, 0xf0, 0x4b, 0x09, 0xa6, - 0xe2, 0xa1, 0x82, 0x29, 0xb5, 0x46, 0x47, 0x0e, 0x59, 0xa3, 0x7f, 0x6e, 0x41, 0x4a, 0xbd, 0x40, - 0x21, 0x94, 0x68, 0x73, 0x3b, 0xf9, 0xbc, 0x2b, 0x65, 0x92, 0xa6, 0x72, 0x46, 0x4c, 0x7b, 0xf6, - 0x13, 0x73, 0x46, 0xa8, 0x29, 0xee, 0x32, 0x0b, 0x79, 0xbc, 0x7d, 0x66, 0x32, 0xbc, 0x1c, 0x04, - 0xbb, 0xfc, 0x0a, 0x44, 0xdf, 0x8b, 0xda, 0x2f, 0xc1, 0x74, 0x57, 0xa3, 0x58, 0x2e, 0xde, 0x40, - 0x3e, 0xa6, 0x65, 0x4c, 0x57, 0x16, 0x50, 0x84, 0x39, 0xcc, 0x7e, 0xd7, 0x82, 0x93, 0x59, 0xf2, - 0xe8, 0x3b, 0x16, 0x4c, 0xc7, 0x59, 0x7a, 0xc7, 0x35, 0x76, 0xca, 0xcf, 0xa7, 0x0b, 0x84, 0xbb, - 0x1b, 0x61, 0xff, 0x3f, 0x31, 0xf9, 0x6f, 0x7a, 0x7e, 0x2d, 0xb8, 0xad, 0x76, 0x79, 0xab, 0xef, - 0x2e, 0x4f, 0xd7, 0xa3, 0xbb, 0x43, 0x6a, 0xed, 0x66, 0x57, 0x24, 0xd3, 0xa6, 0x28, 0xc7, 0x0a, - 0x23, 0xf5, 0x72, 0x75, 0xf1, 0xd0, 0x97, 0xab, 0x5f, 0x84, 0x09, 0xf3, 0xc1, 0x38, 0x31, 0x2f, - 0x99, 0x76, 0x6b, 0xbe, 0x2d, 0x87, 0x53, 0x58, 0x99, 0x27, 0x83, 0x4b, 0x87, 0x3e, 0x19, 0xfc, - 0x2c, 0x94, 0xc5, 0xf3, 0xb7, 0xd2, 0x1b, 0x8e, 0x87, 0x49, 0x89, 0x32, 0xac, 0xa0, 0x54, 0x9a, - 0xb4, 0x1c, 0xbf, 0xed, 0x34, 0xe9, 0x08, 0x89, 0xd8, 0x4e, 0xb5, 0x0c, 0xd7, 0x14, 0x04, 0x1b, - 0x58, 0xb4, 0xc7, 0x89, 0xd7, 0x22, 0xaf, 0x05, 0xbe, 0xf4, 0x23, 0xd1, 0x07, 0xc4, 0xa2, 0x1c, - 0x2b, 0x0c, 0xfb, 0xbf, 0x5a, 0x90, 0x7d, 0xbb, 0x33, 0x75, 0x64, 0x60, 0x1d, 0x1a, 0x4f, 0x9a, - 0x8e, 0x46, 0x2b, 0x0c, 0x14, 0x8d, 0x66, 0x06, 0x8a, 0x15, 0xef, 0x19, 0x28, 0xf6, 0x13, 0xfa, - 0x45, 0x07, 0x1e, 0x51, 0x36, 0xde, 0xeb, 0x35, 0x07, 0x64, 0xc3, 0xa8, 0xeb, 0xa8, 0x8c, 0x03, - 0x13, 0x5c, 0x11, 0x5f, 0x5c, 0x60, 0x48, 0x02, 0x52, 0xdd, 0x7e, 0xef, 0x87, 0xe7, 0x3e, 0xf4, - 0xfd, 0x1f, 0x9e, 0xfb, 0xd0, 0x1f, 0xff, 0xf0, 0xdc, 0x87, 0xbe, 0x78, 0xe7, 0x9c, 0xf5, 0xde, - 0x9d, 0x73, 0xd6, 0xf7, 0xef, 0x9c, 0xb3, 0xfe, 0xf8, 0xce, 0x39, 0xeb, 0xfd, 0x3b, 0xe7, 0xac, - 0x6f, 0xff, 0xa7, 0x73, 0x1f, 0x7a, 0xad, 0xa7, 0xdf, 0x0f, 0xfd, 0xf1, 0xbc, 0x5b, 0x9b, 0xdf, - 0xbb, 0xc0, 0x5c, 0x4f, 0xe8, 0x6a, 0x98, 0x37, 0xa6, 0xc0, 0xbc, 0x5c, 0x0d, 0xff, 0x3f, 0x00, - 0x00, 0xff, 0xff, 0x1e, 0x5d, 0x63, 0x46, 0x8d, 0xc2, 0x00, 0x00, + // 10611 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x25, 0xd9, + 0x75, 0x90, 0xfb, 0x7d, 0x48, 0xef, 0x1d, 0x69, 0x3e, 0x74, 0x67, 0x66, 0x57, 0x3b, 0xde, 0x5d, + 0x4d, 0x7a, 0x2b, 0xeb, 0x35, 0xde, 0x95, 0xb2, 0xe3, 0x5d, 0xb3, 0x64, 0x13, 0x3b, 0x7a, 0xd2, + 0x8c, 0x46, 0x33, 0xd2, 0x48, 0x7b, 0xa5, 0x99, 0xb1, 0xd7, 0x59, 0xaf, 0x5b, 0xfd, 0xae, 0x9e, + 0x7a, 0xd4, 0xaf, 0xbb, 0xb7, 0xbb, 0x9f, 0x46, 0xda, 0xd8, 0x8e, 0x1d, 0xe7, 0xc3, 0xe0, 0x4f, + 0x6c, 0xa8, 0x38, 0x80, 0x83, 0x13, 0x07, 0x8a, 0x14, 0x6c, 0x11, 0xe0, 0x07, 0x81, 0x40, 0xa5, + 0x92, 0xf0, 0xc3, 0x94, 0xa1, 0x48, 0x51, 0xa9, 0x38, 0x40, 0x22, 0x6c, 0x51, 0x14, 0x14, 0x55, + 0xa4, 0x2a, 0xc0, 0x0f, 0x18, 0x28, 0xa0, 0xee, 0xf7, 0xed, 0x7e, 0xef, 0x8d, 0x9e, 0xa4, 0xd6, + 0xcc, 0xd8, 0xd9, 0x7f, 0xef, 0xdd, 0x73, 0xfa, 0x9c, 0xd3, 0xb7, 0xef, 0x3d, 0xf7, 0xdc, 0x73, + 0xcf, 0x39, 0x17, 0x16, 0x5a, 0x5e, 0xba, 0xd1, 0x59, 0x9b, 0x74, 0xc3, 0xf6, 0x94, 0x13, 0xb7, + 0xc2, 0x28, 0x0e, 0x6f, 0xb3, 0x1f, 0xcf, 0xb9, 0xcd, 0xa9, 0xad, 0x8b, 0x53, 0xd1, 0x66, 0x6b, + 0xca, 0x89, 0xbc, 0x64, 0xca, 0x89, 0x22, 0xdf, 0x73, 0x9d, 0xd4, 0x0b, 0x83, 0xa9, 0xad, 0xe7, + 0x1d, 0x3f, 0xda, 0x70, 0x9e, 0x9f, 0x6a, 0x91, 0x80, 0xc4, 0x4e, 0x4a, 0x9a, 0x93, 0x51, 0x1c, + 0xa6, 0x21, 0xfa, 0x11, 0x4d, 0x6d, 0x52, 0x52, 0x63, 0x3f, 0x5e, 0x77, 0x9b, 0x93, 0x5b, 0x17, + 0x27, 0xa3, 0xcd, 0xd6, 0x24, 0xa5, 0x36, 0x69, 0x50, 0x9b, 0x94, 0xd4, 0xce, 0x3f, 0x67, 0xc8, + 0xd2, 0x0a, 0x5b, 0xe1, 0x14, 0x23, 0xba, 0xd6, 0x59, 0x67, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x33, + 0x3b, 0x6f, 0x6f, 0xbe, 0x94, 0x4c, 0x7a, 0x21, 0x15, 0x6f, 0xca, 0x0d, 0x63, 0x32, 0xb5, 0xd5, + 0x25, 0xd0, 0xf9, 0x2b, 0x1a, 0x87, 0x6c, 0xa7, 0x24, 0x48, 0xbc, 0x30, 0x48, 0x9e, 0xa3, 0x22, + 0x90, 0x78, 0x8b, 0xc4, 0xe6, 0xeb, 0x19, 0x08, 0xbd, 0x28, 0xbd, 0xa0, 0x29, 0xb5, 0x1d, 0x77, + 0xc3, 0x0b, 0x48, 0xbc, 0xa3, 0x1f, 0x6f, 0x93, 0xd4, 0xe9, 0xf5, 0xd4, 0x54, 0xbf, 0xa7, 0xe2, + 0x4e, 0x90, 0x7a, 0x6d, 0xd2, 0xf5, 0xc0, 0xfb, 0xf6, 0x7b, 0x20, 0x71, 0x37, 0x48, 0xdb, 0xe9, + 0x7a, 0xee, 0xbd, 0xfd, 0x9e, 0xeb, 0xa4, 0x9e, 0x3f, 0xe5, 0x05, 0x69, 0x92, 0xc6, 0xf9, 0x87, + 0xec, 0x37, 0xe0, 0xc4, 0xf4, 0xad, 0x95, 0xe9, 0x4e, 0xba, 0x31, 0x13, 0x06, 0xeb, 0x5e, 0x0b, + 0xbd, 0x08, 0x23, 0xae, 0xdf, 0x49, 0x52, 0x12, 0x5f, 0x77, 0xda, 0x64, 0xdc, 0xba, 0x60, 0x3d, + 0x53, 0x6f, 0x9c, 0xf9, 0xe6, 0xee, 0xc4, 0x3b, 0xf6, 0x76, 0x27, 0x46, 0x66, 0x34, 0x08, 0x9b, + 0x78, 0xe8, 0xdd, 0x30, 0x1c, 0x87, 0x3e, 0x99, 0xc6, 0xd7, 0xc7, 0x4b, 0xec, 0x91, 0x53, 0xe2, + 0x91, 0x61, 0xcc, 0x9b, 0xb1, 0x84, 0xdb, 0xbf, 0x5f, 0x02, 0x98, 0x8e, 0xa2, 0xe5, 0x38, 0xbc, + 0x4d, 0xdc, 0x14, 0x7d, 0x14, 0x6a, 0xb4, 0xeb, 0x9a, 0x4e, 0xea, 0x30, 0x6e, 0x23, 0x17, 0x7f, + 0x68, 0x92, 0xbf, 0xc9, 0xa4, 0xf9, 0x26, 0x7a, 0xe0, 0x50, 0xec, 0xc9, 0xad, 0xe7, 0x27, 0x97, + 0xd6, 0xe8, 0xf3, 0x8b, 0x24, 0x75, 0x1a, 0x48, 0x30, 0x03, 0xdd, 0x86, 0x15, 0x55, 0x14, 0x40, + 0x25, 0x89, 0x88, 0xcb, 0x04, 0x1b, 0xb9, 0xb8, 0x30, 0x79, 0x94, 0x11, 0x3a, 0xa9, 0x25, 0x5f, + 0x89, 0x88, 0xdb, 0x18, 0x15, 0x9c, 0x2b, 0xf4, 0x1f, 0x66, 0x7c, 0xd0, 0x16, 0x0c, 0x25, 0xa9, + 0x93, 0x76, 0x92, 0xf1, 0x32, 0xe3, 0x78, 0xbd, 0x30, 0x8e, 0x8c, 0x6a, 0xe3, 0xa4, 0xe0, 0x39, + 0xc4, 0xff, 0x63, 0xc1, 0xcd, 0xfe, 0x23, 0x0b, 0x4e, 0x6a, 0xe4, 0x05, 0x2f, 0x49, 0xd1, 0x8f, + 0x77, 0x75, 0xee, 0xe4, 0x60, 0x9d, 0x4b, 0x9f, 0x66, 0x5d, 0x7b, 0x5a, 0x30, 0xab, 0xc9, 0x16, + 0xa3, 0x63, 0xdb, 0x50, 0xf5, 0x52, 0xd2, 0x4e, 0xc6, 0x4b, 0x17, 0xca, 0xcf, 0x8c, 0x5c, 0xbc, + 0x52, 0xd4, 0x7b, 0x36, 0x4e, 0x08, 0xa6, 0xd5, 0x79, 0x4a, 0x1e, 0x73, 0x2e, 0xf6, 0xaf, 0x8e, + 0x9a, 0xef, 0x47, 0x3b, 0x1c, 0x3d, 0x0f, 0x23, 0x49, 0xd8, 0x89, 0x5d, 0x82, 0x49, 0x14, 0x26, + 0xe3, 0xd6, 0x85, 0x32, 0x1d, 0x7a, 0x74, 0xa4, 0xae, 0xe8, 0x66, 0x6c, 0xe2, 0xa0, 0x2f, 0x58, + 0x30, 0xda, 0x24, 0x49, 0xea, 0x05, 0x8c, 0xbf, 0x14, 0x7e, 0xf5, 0xc8, 0xc2, 0xcb, 0xc6, 0x59, + 0x4d, 0xbc, 0x71, 0x56, 0xbc, 0xc8, 0xa8, 0xd1, 0x98, 0xe0, 0x0c, 0x7f, 0x3a, 0xe3, 0x9a, 0x24, + 0x71, 0x63, 0x2f, 0xa2, 0xff, 0xd9, 0x98, 0x31, 0x66, 0xdc, 0xac, 0x06, 0x61, 0x13, 0x0f, 0x05, + 0x50, 0xa5, 0x33, 0x2a, 0x19, 0xaf, 0x30, 0xf9, 0xe7, 0x8f, 0x26, 0xbf, 0xe8, 0x54, 0x3a, 0x59, + 0x75, 0xef, 0xd3, 0x7f, 0x09, 0xe6, 0x6c, 0xd0, 0xe7, 0x2d, 0x18, 0x17, 0x33, 0x1e, 0x13, 0xde, + 0xa1, 0xb7, 0x36, 0xbc, 0x94, 0xf8, 0x5e, 0x92, 0x8e, 0x57, 0x99, 0x0c, 0x53, 0x83, 0x8d, 0xad, + 0xb9, 0x38, 0xec, 0x44, 0xd7, 0xbc, 0xa0, 0xd9, 0xb8, 0x20, 0x38, 0x8d, 0xcf, 0xf4, 0x21, 0x8c, + 0xfb, 0xb2, 0x44, 0x5f, 0xb1, 0xe0, 0x7c, 0xe0, 0xb4, 0x49, 0x12, 0x39, 0xf4, 0xd3, 0x72, 0x70, + 0xc3, 0x77, 0xdc, 0x4d, 0x26, 0xd1, 0xd0, 0xe1, 0x24, 0xb2, 0x85, 0x44, 0xe7, 0xaf, 0xf7, 0x25, + 0x8d, 0xef, 0xc1, 0x16, 0x7d, 0xc3, 0x82, 0xb1, 0x30, 0x8e, 0x36, 0x9c, 0x80, 0x34, 0x25, 0x34, + 0x19, 0x1f, 0x66, 0x53, 0xef, 0x23, 0x47, 0xfb, 0x44, 0x4b, 0x79, 0xb2, 0x8b, 0x61, 0xe0, 0xa5, + 0x61, 0xbc, 0x42, 0xd2, 0xd4, 0x0b, 0x5a, 0x49, 0xe3, 0xdc, 0xde, 0xee, 0xc4, 0x58, 0x17, 0x16, + 0xee, 0x96, 0x07, 0xfd, 0x04, 0x8c, 0x24, 0x3b, 0x81, 0x7b, 0xcb, 0x0b, 0x9a, 0xe1, 0x9d, 0x64, + 0xbc, 0x56, 0xc4, 0xf4, 0x5d, 0x51, 0x04, 0xc5, 0x04, 0xd4, 0x0c, 0xb0, 0xc9, 0xad, 0xf7, 0x87, + 0xd3, 0x43, 0xa9, 0x5e, 0xf4, 0x87, 0xd3, 0x83, 0xe9, 0x1e, 0x6c, 0xd1, 0xcf, 0x59, 0x70, 0x22, + 0xf1, 0x5a, 0x81, 0x93, 0x76, 0x62, 0x72, 0x8d, 0xec, 0x24, 0xe3, 0xc0, 0x04, 0xb9, 0x7a, 0xc4, + 0x5e, 0x31, 0x48, 0x36, 0xce, 0x09, 0x19, 0x4f, 0x98, 0xad, 0x09, 0xce, 0xf2, 0xed, 0x35, 0xd1, + 0xf4, 0xb0, 0x1e, 0x29, 0x76, 0xa2, 0xe9, 0x41, 0xdd, 0x97, 0x25, 0xfa, 0x31, 0x38, 0xcd, 0x9b, + 0x54, 0xcf, 0x26, 0xe3, 0xa3, 0x4c, 0xd1, 0x9e, 0xdd, 0xdb, 0x9d, 0x38, 0xbd, 0x92, 0x83, 0xe1, + 0x2e, 0x6c, 0xf4, 0x06, 0x4c, 0x44, 0x24, 0x6e, 0x7b, 0xe9, 0x52, 0xe0, 0xef, 0x48, 0xf5, 0xed, + 0x86, 0x11, 0x69, 0x0a, 0x71, 0x92, 0xf1, 0x13, 0x17, 0xac, 0x67, 0x6a, 0x8d, 0x77, 0x09, 0x31, + 0x27, 0x96, 0xef, 0x8d, 0x8e, 0xf7, 0xa3, 0x67, 0xff, 0xf3, 0x12, 0x9c, 0xce, 0x2f, 0x9c, 0xe8, + 0x6f, 0x5a, 0x70, 0xea, 0xf6, 0x9d, 0x74, 0x35, 0xdc, 0x24, 0x41, 0xd2, 0xd8, 0xa1, 0xea, 0x8d, + 0x2d, 0x19, 0x23, 0x17, 0xdd, 0x62, 0x97, 0xe8, 0xc9, 0xab, 0x59, 0x2e, 0x97, 0x82, 0x34, 0xde, + 0x69, 0x3c, 0x2a, 0xde, 0xee, 0xd4, 0xd5, 0x5b, 0xab, 0x26, 0x14, 0xe7, 0x85, 0x3a, 0xff, 0x59, + 0x0b, 0xce, 0xf6, 0x22, 0x81, 0x4e, 0x43, 0x79, 0x93, 0xec, 0x70, 0xab, 0x0c, 0xd3, 0x9f, 0xe8, + 0x35, 0xa8, 0x6e, 0x39, 0x7e, 0x87, 0x08, 0xeb, 0x66, 0xee, 0x68, 0x2f, 0xa2, 0x24, 0xc3, 0x9c, + 0xea, 0x0f, 0x97, 0x5e, 0xb2, 0xec, 0x7f, 0x55, 0x86, 0x11, 0x63, 0x7d, 0xbb, 0x0f, 0x16, 0x5b, + 0x98, 0xb1, 0xd8, 0x16, 0x0b, 0x5b, 0x9a, 0xfb, 0x9a, 0x6c, 0x77, 0x72, 0x26, 0xdb, 0x52, 0x71, + 0x2c, 0xef, 0x69, 0xb3, 0xa1, 0x14, 0xea, 0x61, 0x44, 0x2d, 0x72, 0xba, 0xf4, 0x57, 0x8a, 0xf8, + 0x84, 0x4b, 0x92, 0x5c, 0xe3, 0xc4, 0xde, 0xee, 0x44, 0x5d, 0xfd, 0xc5, 0x9a, 0x91, 0xfd, 0x6d, + 0x0b, 0xce, 0x1a, 0x32, 0xce, 0x84, 0x41, 0xd3, 0x63, 0x9f, 0xf6, 0x02, 0x54, 0xd2, 0x9d, 0x48, + 0x9a, 0xfd, 0xaa, 0xa7, 0x56, 0x77, 0x22, 0x82, 0x19, 0x84, 0x1a, 0xfa, 0x6d, 0x92, 0x24, 0x4e, + 0x8b, 0xe4, 0x0d, 0xfd, 0x45, 0xde, 0x8c, 0x25, 0x1c, 0xc5, 0x80, 0x7c, 0x27, 0x49, 0x57, 0x63, + 0x27, 0x48, 0x18, 0xf9, 0x55, 0xaf, 0x4d, 0x44, 0x07, 0xff, 0x99, 0xc1, 0x46, 0x0c, 0x7d, 0xa2, + 0xf1, 0xc8, 0xde, 0xee, 0x04, 0x5a, 0xe8, 0xa2, 0x84, 0x7b, 0x50, 0xb7, 0xbf, 0x62, 0xc1, 0x23, + 0xbd, 0x6d, 0x31, 0xf4, 0x34, 0x0c, 0xf1, 0x2d, 0x9f, 0x78, 0x3b, 0xfd, 0x49, 0x58, 0x2b, 0x16, + 0x50, 0x34, 0x05, 0x75, 0xb5, 0x4e, 0x88, 0x77, 0x1c, 0x13, 0xa8, 0x75, 0xbd, 0xb8, 0x68, 0x1c, + 0xda, 0x69, 0xf4, 0x8f, 0xb0, 0xdc, 0x54, 0xa7, 0xb1, 0x4d, 0x12, 0x83, 0xd8, 0xff, 0xde, 0x82, + 0x53, 0x86, 0x54, 0xf7, 0xc1, 0x34, 0x0f, 0xb2, 0xa6, 0xf9, 0x7c, 0x61, 0xe3, 0xb9, 0x8f, 0x6d, + 0xfe, 0x79, 0x0b, 0xce, 0x1b, 0x58, 0x8b, 0x4e, 0xea, 0x6e, 0x5c, 0xda, 0x8e, 0x62, 0x92, 0xd0, + 0xed, 0x34, 0x7a, 0xc2, 0xd0, 0x5b, 0x8d, 0x11, 0x41, 0xa1, 0x7c, 0x8d, 0xec, 0x70, 0x25, 0xf6, + 0x2c, 0xd4, 0xf8, 0xe0, 0x0c, 0x63, 0xd1, 0xe3, 0xea, 0xdd, 0x96, 0x44, 0x3b, 0x56, 0x18, 0xc8, + 0x86, 0x21, 0xa6, 0x9c, 0xe8, 0x64, 0xa5, 0xcb, 0x10, 0xd0, 0x8f, 0x78, 0x93, 0xb5, 0x60, 0x01, + 0xb1, 0x97, 0x32, 0xe2, 0x2c, 0xc7, 0x84, 0x7d, 0xdc, 0xe6, 0x65, 0x8f, 0xf8, 0xcd, 0x84, 0x6e, + 0x1b, 0x9c, 0x20, 0x08, 0x53, 0xb1, 0x03, 0x30, 0xb6, 0x0d, 0xd3, 0xba, 0x19, 0x9b, 0x38, 0xf6, + 0x5e, 0x89, 0x6d, 0x3e, 0xd4, 0xb4, 0x26, 0xf7, 0x63, 0xe7, 0x1a, 0x67, 0xf4, 0xe0, 0x72, 0x71, + 0x4a, 0x89, 0xf4, 0xdf, 0xbd, 0xbe, 0x99, 0x53, 0x85, 0xb8, 0x50, 0xae, 0xf7, 0xde, 0xc1, 0xfe, + 0x56, 0x09, 0x26, 0xb2, 0x0f, 0x74, 0x69, 0x52, 0xba, 0x5d, 0x32, 0x18, 0xe5, 0x1d, 0x14, 0x06, + 0x3e, 0x36, 0xf1, 0xfa, 0x28, 0xa3, 0xd2, 0x71, 0x2a, 0x23, 0x53, 0x57, 0x96, 0xf7, 0xd1, 0x95, + 0x4f, 0xab, 0x5e, 0xaf, 0xe4, 0x94, 0x53, 0x76, 0xbd, 0xb8, 0x00, 0x95, 0x24, 0x25, 0xd1, 0x78, + 0x35, 0xab, 0x6b, 0x56, 0x52, 0x12, 0x61, 0x06, 0xb1, 0xff, 0x4b, 0x09, 0x1e, 0xcd, 0xf6, 0xa1, + 0x56, 0xef, 0x1f, 0xc8, 0xa8, 0xf7, 0xf7, 0x98, 0xea, 0xfd, 0xee, 0xee, 0xc4, 0x3b, 0xfb, 0x3c, + 0xf6, 0x3d, 0xa3, 0xfd, 0xd1, 0x5c, 0xae, 0x17, 0xa7, 0xb2, 0xbd, 0x78, 0x77, 0x77, 0xe2, 0x89, + 0x3e, 0xef, 0x98, 0xeb, 0xe6, 0xa7, 0x61, 0x28, 0x26, 0x4e, 0x12, 0x06, 0xa2, 0xa3, 0xd5, 0xe7, + 0xc0, 0xac, 0x15, 0x0b, 0xa8, 0xfd, 0xaf, 0xeb, 0xf9, 0xce, 0x9e, 0xe3, 0x0e, 0xb6, 0x30, 0x46, + 0x1e, 0x54, 0x98, 0xc9, 0xce, 0x55, 0xc3, 0xb5, 0xa3, 0x4d, 0x23, 0xaa, 0xe2, 0x15, 0xe9, 0x46, + 0x8d, 0x7e, 0x35, 0xda, 0x84, 0x19, 0x0b, 0xb4, 0x0d, 0x35, 0x57, 0x5a, 0xd2, 0xa5, 0x22, 0x7c, + 0x4e, 0xc2, 0x8e, 0xd6, 0x1c, 0x47, 0xa9, 0x2e, 0x56, 0xe6, 0xb7, 0xe2, 0x86, 0x08, 0x94, 0x5b, + 0x5e, 0x2a, 0x3e, 0xeb, 0x11, 0xf7, 0x4a, 0x73, 0x9e, 0xf1, 0x8a, 0xc3, 0x74, 0x81, 0x98, 0xf3, + 0x52, 0x4c, 0xe9, 0xa3, 0x9f, 0xb1, 0x60, 0x24, 0x71, 0xdb, 0xcb, 0x71, 0xb8, 0xe5, 0x35, 0x49, + 0x2c, 0x2c, 0xa5, 0x23, 0xaa, 0xa6, 0x95, 0x99, 0x45, 0x49, 0x50, 0xf3, 0xe5, 0x7b, 0x57, 0x0d, + 0xc1, 0x26, 0x5f, 0xba, 0x83, 0x78, 0x54, 0xbc, 0xfb, 0x2c, 0x71, 0x3d, 0xba, 0xb6, 0xc9, 0x0d, + 0x13, 0x1b, 0x29, 0x47, 0xb6, 0x1c, 0x67, 0x3b, 0xee, 0x26, 0x9d, 0x6f, 0x5a, 0xa0, 0x77, 0xee, + 0xed, 0x4e, 0x3c, 0x3a, 0xd3, 0x9b, 0x27, 0xee, 0x27, 0x0c, 0xeb, 0xb0, 0xa8, 0xe3, 0xfb, 0x98, + 0xbc, 0xd1, 0x21, 0xcc, 0x1d, 0x52, 0x40, 0x87, 0x2d, 0x6b, 0x82, 0xb9, 0x0e, 0x33, 0x20, 0xd8, + 0xe4, 0x8b, 0xde, 0x80, 0xa1, 0xb6, 0x93, 0xc6, 0xde, 0xb6, 0xf0, 0x81, 0x1c, 0xd1, 0x96, 0x5f, + 0x64, 0xb4, 0x34, 0x73, 0xb6, 0xf4, 0xf3, 0x46, 0x2c, 0x18, 0xa1, 0x36, 0x54, 0xdb, 0x24, 0x6e, + 0x91, 0xf1, 0x5a, 0x11, 0xfe, 0xde, 0x45, 0x4a, 0x4a, 0x33, 0xac, 0x53, 0xcb, 0x87, 0xb5, 0x61, + 0xce, 0x05, 0xbd, 0x06, 0xb5, 0x84, 0xf8, 0xc4, 0xa5, 0xb6, 0x4b, 0x9d, 0x71, 0x7c, 0xef, 0x80, + 0x76, 0x9c, 0xb3, 0x46, 0xfc, 0x15, 0xf1, 0x28, 0x9f, 0x60, 0xf2, 0x1f, 0x56, 0x24, 0x69, 0x07, + 0x46, 0x7e, 0xa7, 0xe5, 0x05, 0xe3, 0x50, 0x44, 0x07, 0x2e, 0x33, 0x5a, 0xb9, 0x0e, 0xe4, 0x8d, + 0x58, 0x30, 0xb2, 0xff, 0xa3, 0x05, 0x28, 0xab, 0xd4, 0xee, 0x83, 0xc1, 0xfa, 0x46, 0xd6, 0x60, + 0x5d, 0x28, 0xd2, 0xea, 0xe8, 0x63, 0xb3, 0xfe, 0x46, 0x1d, 0x72, 0xcb, 0xc1, 0x75, 0x92, 0xa4, + 0xa4, 0xf9, 0xb6, 0x0a, 0x7f, 0x5b, 0x85, 0xbf, 0xad, 0xc2, 0x95, 0x0a, 0x5f, 0xcb, 0xa9, 0xf0, + 0xf7, 0x1b, 0xb3, 0x5e, 0x1f, 0x98, 0xbe, 0xae, 0x4e, 0x54, 0x4d, 0x09, 0x0c, 0x04, 0xaa, 0x09, + 0xae, 0xae, 0x2c, 0x5d, 0xef, 0xa9, 0xb3, 0x5f, 0xcf, 0xea, 0xec, 0xa3, 0xb2, 0xf8, 0xd3, 0xa0, + 0xa5, 0xff, 0x6a, 0x09, 0x1e, 0xcb, 0x6a, 0x2f, 0x1c, 0xfa, 0x7e, 0xd8, 0x49, 0xe9, 0x5e, 0x00, + 0xfd, 0xa2, 0x05, 0xa7, 0xdb, 0xd9, 0x4d, 0x78, 0x22, 0x7c, 0x9d, 0x1f, 0x2c, 0x4c, 0xb5, 0xe6, + 0x76, 0xf9, 0x8d, 0x71, 0xa1, 0x66, 0x4f, 0xe7, 0x00, 0x09, 0xee, 0x92, 0x05, 0xbd, 0x06, 0xf5, + 0xb6, 0xb3, 0x7d, 0x23, 0x6a, 0x3a, 0xa9, 0xdc, 0x86, 0xf5, 0xdf, 0x3d, 0x77, 0x52, 0xcf, 0x9f, + 0xe4, 0x27, 0xd8, 0x93, 0xf3, 0x41, 0xba, 0x14, 0xaf, 0xa4, 0xb1, 0x17, 0xb4, 0xb8, 0x87, 0x6b, + 0x51, 0x92, 0xc1, 0x9a, 0xa2, 0xfd, 0x35, 0x2b, 0xaf, 0xdb, 0x55, 0xef, 0xc4, 0x4e, 0x4a, 0x5a, + 0x3b, 0xe8, 0x63, 0x50, 0xa5, 0xfb, 0x25, 0xd9, 0x2b, 0xb7, 0x8a, 0x5c, 0x70, 0x8c, 0x2f, 0xa1, + 0xd7, 0x1e, 0xfa, 0x2f, 0xc1, 0x9c, 0xa9, 0xfd, 0x95, 0xe1, 0xfc, 0x1a, 0xcb, 0xce, 0x33, 0x2f, + 0x02, 0xb4, 0xc2, 0x55, 0xd2, 0x8e, 0x7c, 0xda, 0x2d, 0x16, 0x73, 0x8a, 0x2b, 0x17, 0xc1, 0x9c, + 0x82, 0x60, 0x03, 0x0b, 0xfd, 0x79, 0x0b, 0xa0, 0x25, 0x87, 0x8a, 0x5c, 0x3f, 0x6f, 0x14, 0xf9, + 0x3a, 0x7a, 0x20, 0x6a, 0x59, 0x14, 0x43, 0x6c, 0x30, 0x47, 0x3f, 0x65, 0x41, 0x2d, 0x95, 0xe2, + 0xf3, 0x15, 0x65, 0xb5, 0x48, 0x49, 0xe4, 0x4b, 0x6b, 0x53, 0x42, 0x75, 0x89, 0xe2, 0x8b, 0x7e, + 0xd6, 0x02, 0x48, 0x76, 0x02, 0x77, 0x39, 0xf4, 0x3d, 0x77, 0x47, 0x2c, 0x34, 0x37, 0x0b, 0x75, + 0x63, 0x28, 0xea, 0x8d, 0x93, 0xb4, 0x37, 0xf4, 0x7f, 0x6c, 0x70, 0x46, 0x9f, 0x80, 0x5a, 0x22, + 0x86, 0x9b, 0x58, 0x5a, 0x56, 0x8b, 0x75, 0xa6, 0x70, 0xda, 0x42, 0x2b, 0x89, 0x7f, 0x58, 0xf1, + 0x44, 0x3f, 0x6f, 0xc1, 0xa9, 0x28, 0xeb, 0xfa, 0x12, 0xab, 0x48, 0x71, 0x3a, 0x20, 0xe7, 0x5a, + 0x6b, 0x9c, 0xd9, 0xdb, 0x9d, 0x38, 0x95, 0x6b, 0xc4, 0x79, 0x29, 0xd0, 0x0c, 0x8c, 0xe9, 0x11, + 0xbc, 0x14, 0x71, 0x37, 0xdc, 0x30, 0x73, 0xc3, 0xb1, 0x53, 0xcc, 0xb9, 0x3c, 0x10, 0x77, 0xe3, + 0xa3, 0x65, 0x38, 0x4b, 0xa5, 0xdb, 0xe1, 0x56, 0x9b, 0xd4, 0xca, 0x09, 0x5b, 0x43, 0x6a, 0x8d, + 0xc7, 0xc5, 0x08, 0x61, 0x8e, 0xee, 0x3c, 0x0e, 0xee, 0xf9, 0xa4, 0xfd, 0xad, 0x52, 0xc6, 0x2f, + 0xae, 0x1c, 0x56, 0x6c, 0x8e, 0xb9, 0xd2, 0x57, 0x20, 0x55, 0x46, 0xa1, 0x73, 0x4c, 0x79, 0x22, + 0xf4, 0x1c, 0x53, 0x4d, 0x09, 0x36, 0x98, 0x53, 0x03, 0x66, 0xcc, 0xc9, 0xbb, 0xc5, 0xc4, 0xb4, + 0x7f, 0xad, 0x48, 0x91, 0xba, 0x4f, 0x31, 0x1e, 0x13, 0xa2, 0x8d, 0x75, 0x81, 0x70, 0xb7, 0x48, + 0xf6, 0xb7, 0xb2, 0xbe, 0x78, 0x63, 0xc4, 0x0e, 0x70, 0xce, 0xf0, 0x05, 0x0b, 0x46, 0xe2, 0xd0, + 0xf7, 0xbd, 0xa0, 0x45, 0x67, 0x97, 0x58, 0x22, 0x3e, 0x7c, 0x2c, 0x5a, 0x5a, 0x4c, 0x23, 0x66, + 0x06, 0x61, 0xcd, 0x13, 0x9b, 0x02, 0xd8, 0x7f, 0x64, 0xc1, 0x78, 0x3f, 0x2d, 0x80, 0x08, 0xbc, + 0x53, 0x0e, 0x71, 0x75, 0xca, 0xbe, 0x14, 0xcc, 0x12, 0x9f, 0x28, 0x27, 0x65, 0xad, 0xf1, 0x94, + 0x78, 0xcd, 0x77, 0x2e, 0xf7, 0x47, 0xc5, 0xf7, 0xa2, 0x83, 0x5e, 0x85, 0xd3, 0xc6, 0x7b, 0x25, + 0xaa, 0x63, 0xea, 0x8d, 0x49, 0xba, 0xec, 0x4e, 0xe7, 0x60, 0x77, 0x77, 0x27, 0x1e, 0xc9, 0xb7, + 0x09, 0x35, 0xd5, 0x45, 0xc7, 0xfe, 0x95, 0x52, 0xfe, 0x6b, 0xa9, 0x15, 0xe6, 0xab, 0x56, 0xd7, + 0xd6, 0xef, 0x83, 0xc7, 0xa1, 0xd5, 0xd9, 0x26, 0x51, 0x1d, 0xe4, 0xf7, 0xc7, 0x79, 0x80, 0x27, + 0x85, 0xf6, 0xbf, 0xa8, 0xc0, 0x3d, 0x24, 0x53, 0x67, 0x41, 0x56, 0xbf, 0xb3, 0xa0, 0x83, 0x1f, + 0x2f, 0x7d, 0xce, 0x82, 0x21, 0x9f, 0x5a, 0xa1, 0xfc, 0xbc, 0x63, 0xe4, 0x62, 0xf3, 0xb8, 0xfa, + 0x9e, 0x1b, 0xbb, 0x09, 0x3f, 0xad, 0x56, 0x2e, 0x4f, 0xde, 0x88, 0x85, 0x0c, 0xe8, 0xeb, 0x56, + 0xf6, 0xf0, 0x84, 0x87, 0x1f, 0x79, 0xc7, 0x26, 0x93, 0x71, 0x22, 0xc3, 0x05, 0xd3, 0xbe, 0xfe, + 0x3e, 0x67, 0x35, 0x68, 0x12, 0x60, 0xdd, 0x0b, 0x1c, 0xdf, 0x7b, 0x93, 0xee, 0xa6, 0xab, 0x6c, + 0x59, 0x61, 0xeb, 0xf4, 0x65, 0xd5, 0x8a, 0x0d, 0x8c, 0xf3, 0x7f, 0x0e, 0x46, 0x8c, 0x37, 0xef, + 0x71, 0xc8, 0x7e, 0xd6, 0x3c, 0x64, 0xaf, 0x1b, 0x67, 0xe3, 0xe7, 0xdf, 0x0f, 0xa7, 0xf3, 0x02, + 0x1e, 0xe4, 0x79, 0xfb, 0x7f, 0x0e, 0xe7, 0x4f, 0x3c, 0x56, 0x49, 0xdc, 0xa6, 0xa2, 0xbd, 0xed, + 0x85, 0x78, 0xdb, 0x0b, 0xf1, 0xb6, 0x17, 0xc2, 0x74, 0x24, 0x8b, 0x1d, 0xf6, 0xf0, 0x7d, 0xda, + 0x61, 0x67, 0x7c, 0x06, 0xb5, 0xc2, 0x7d, 0x06, 0xf6, 0x5e, 0x15, 0x32, 0x76, 0x14, 0xef, 0xef, + 0x77, 0xc3, 0x70, 0x4c, 0xa2, 0xf0, 0x06, 0x5e, 0x10, 0x6b, 0x88, 0x0e, 0xa4, 0xe6, 0xcd, 0x58, + 0xc2, 0xe9, 0x5a, 0x13, 0x39, 0xe9, 0x86, 0x58, 0x44, 0xd4, 0x5a, 0xb3, 0xec, 0xa4, 0x1b, 0x98, + 0x41, 0xd0, 0xfb, 0xe1, 0x64, 0xea, 0xc4, 0x2d, 0x92, 0x62, 0xb2, 0xc5, 0x3e, 0xab, 0x38, 0x17, + 0x7b, 0x44, 0xe0, 0x9e, 0x5c, 0xcd, 0x40, 0x71, 0x0e, 0x1b, 0xbd, 0x01, 0x95, 0x0d, 0xe2, 0xb7, + 0x45, 0x97, 0xaf, 0x14, 0xa7, 0xe3, 0xd9, 0xbb, 0x5e, 0x21, 0x7e, 0x9b, 0x6b, 0x20, 0xfa, 0x0b, + 0x33, 0x56, 0x74, 0xbc, 0xd5, 0x37, 0x3b, 0x49, 0x1a, 0xb6, 0xbd, 0x37, 0xa5, 0x3b, 0xe8, 0x83, + 0x05, 0x33, 0xbe, 0x26, 0xe9, 0x73, 0x07, 0x82, 0xfa, 0x8b, 0x35, 0x67, 0x26, 0x47, 0xd3, 0x8b, + 0xd9, 0xa7, 0xda, 0x11, 0x5e, 0x9d, 0xa2, 0xe5, 0x98, 0x95, 0xf4, 0xb9, 0x1c, 0xea, 0x2f, 0xd6, + 0x9c, 0xd1, 0x8e, 0x1a, 0xf7, 0x23, 0x4c, 0x86, 0x1b, 0x05, 0xcb, 0xc0, 0xc7, 0x7c, 0xcf, 0xf1, + 0xff, 0x14, 0x54, 0xdd, 0x0d, 0x27, 0x4e, 0xc7, 0x47, 0xd9, 0xa0, 0x51, 0x8e, 0x8c, 0x19, 0xda, + 0x88, 0x39, 0x0c, 0x3d, 0x01, 0xe5, 0x98, 0xac, 0xb3, 0xf8, 0x3d, 0x23, 0xb2, 0x03, 0x93, 0x75, + 0x4c, 0xdb, 0xed, 0x5f, 0x2a, 0x65, 0xcd, 0xa5, 0xec, 0x7b, 0xf3, 0xd1, 0xee, 0x76, 0xe2, 0x44, + 0x3a, 0x3b, 0x8c, 0xd1, 0xce, 0x9a, 0xb1, 0x84, 0xa3, 0x4f, 0x59, 0x30, 0x7c, 0x3b, 0x09, 0x83, + 0x80, 0xa4, 0x62, 0x69, 0xba, 0x59, 0x70, 0x57, 0x5c, 0xe5, 0xd4, 0xb5, 0x0c, 0xa2, 0x01, 0x4b, + 0xbe, 0x54, 0x5c, 0xb2, 0xed, 0xfa, 0x9d, 0x66, 0xd7, 0x81, 0xfe, 0x25, 0xde, 0x8c, 0x25, 0x9c, + 0xa2, 0x7a, 0x01, 0x47, 0xad, 0x64, 0x51, 0xe7, 0x03, 0x81, 0x2a, 0xe0, 0xf6, 0x5f, 0x1e, 0x82, + 0x73, 0x3d, 0x27, 0x07, 0x35, 0x64, 0x98, 0xa9, 0x70, 0xd9, 0xf3, 0x89, 0x0c, 0x53, 0x61, 0x86, + 0xcc, 0x4d, 0xd5, 0x8a, 0x0d, 0x0c, 0xf4, 0x93, 0x00, 0x91, 0x13, 0x3b, 0x6d, 0x22, 0x16, 0xf0, + 0xf2, 0xd1, 0xed, 0x05, 0x2a, 0xc7, 0xb2, 0xa4, 0xa9, 0xf7, 0xa6, 0xaa, 0x29, 0xc1, 0x06, 0x4b, + 0xf4, 0x22, 0x8c, 0xc4, 0xc4, 0x27, 0x4e, 0xc2, 0xc2, 0x3f, 0xf3, 0xb1, 0xec, 0x58, 0x83, 0xb0, + 0x89, 0x87, 0x9e, 0x56, 0x11, 0x3d, 0xb9, 0xe8, 0x87, 0x6c, 0x54, 0x0f, 0xfa, 0xa2, 0x05, 0x27, + 0xd7, 0x3d, 0x9f, 0x68, 0xee, 0x22, 0xf2, 0x7c, 0xe9, 0xe8, 0x2f, 0x79, 0xd9, 0xa4, 0xab, 0x35, + 0x64, 0xa6, 0x39, 0xc1, 0x39, 0xf6, 0xf4, 0x33, 0x6f, 0x91, 0x98, 0xa9, 0xd6, 0xa1, 0xec, 0x67, + 0xbe, 0xc9, 0x9b, 0xb1, 0x84, 0xa3, 0x69, 0x38, 0x15, 0x39, 0x49, 0x32, 0x13, 0x93, 0x26, 0x09, + 0x52, 0xcf, 0xf1, 0x79, 0x5c, 0x78, 0x4d, 0xc7, 0x85, 0x2e, 0x67, 0xc1, 0x38, 0x8f, 0x8f, 0x3e, + 0x04, 0x8f, 0x7a, 0xad, 0x20, 0x8c, 0xc9, 0xa2, 0x97, 0x24, 0x5e, 0xd0, 0xd2, 0xc3, 0x40, 0x38, + 0x3d, 0x26, 0x04, 0xa9, 0x47, 0xe7, 0x7b, 0xa3, 0xe1, 0x7e, 0xcf, 0xa3, 0x67, 0xa1, 0x96, 0x6c, + 0x7a, 0xd1, 0x4c, 0xdc, 0x4c, 0x98, 0x83, 0xbc, 0xa6, 0x5d, 0x6c, 0x2b, 0xa2, 0x1d, 0x2b, 0x0c, + 0xe4, 0xc2, 0x28, 0xff, 0x24, 0x3c, 0x6c, 0x49, 0xe8, 0xc7, 0xe7, 0xfa, 0x2e, 0x8f, 0x22, 0x75, + 0x69, 0x12, 0x3b, 0x77, 0x2e, 0x49, 0x77, 0x7d, 0xe3, 0xf4, 0xde, 0xee, 0xc4, 0xe8, 0x4d, 0x83, + 0x0c, 0xce, 0x10, 0xb5, 0x7f, 0xa1, 0x94, 0xdd, 0x71, 0x9b, 0x93, 0x14, 0x25, 0x74, 0x2a, 0xa6, + 0x37, 0x9d, 0x58, 0x7a, 0x63, 0x8e, 0x18, 0xbe, 0x2e, 0xe8, 0xde, 0x74, 0x62, 0x73, 0x52, 0x33, + 0x06, 0x58, 0x72, 0x42, 0xb7, 0xa1, 0x92, 0xfa, 0x4e, 0x41, 0xf9, 0x2e, 0x06, 0x47, 0xed, 0x00, + 0x59, 0x98, 0x4e, 0x30, 0xe3, 0x81, 0x1e, 0xa7, 0x56, 0xff, 0x9a, 0x8c, 0x71, 0x13, 0x86, 0xfa, + 0x5a, 0x82, 0x59, 0xab, 0xfd, 0xff, 0x6a, 0x3d, 0xf4, 0xaa, 0x5a, 0xc8, 0xd0, 0x45, 0x00, 0xba, + 0x81, 0x5c, 0x8e, 0xc9, 0xba, 0xb7, 0x2d, 0x0c, 0x09, 0x35, 0x77, 0xaf, 0x2b, 0x08, 0x36, 0xb0, + 0xe4, 0x33, 0x2b, 0x9d, 0x75, 0xfa, 0x4c, 0xa9, 0xfb, 0x19, 0x0e, 0xc1, 0x06, 0x16, 0x7a, 0x01, + 0x86, 0xbc, 0xb6, 0xd3, 0x52, 0xa1, 0x78, 0x8f, 0xd3, 0x49, 0x3b, 0xcf, 0x5a, 0xee, 0xee, 0x4e, + 0x9c, 0x54, 0x02, 0xb1, 0x26, 0x2c, 0x70, 0xd1, 0xaf, 0x58, 0x30, 0xea, 0x86, 0xed, 0x76, 0x18, + 0xf0, 0x6d, 0x97, 0xd8, 0x43, 0xde, 0x3e, 0xae, 0x65, 0x7e, 0x72, 0xc6, 0x60, 0xc6, 0x37, 0x91, + 0x2a, 0x31, 0xc7, 0x04, 0xe1, 0x8c, 0x54, 0xe6, 0xdc, 0xae, 0xee, 0x33, 0xb7, 0x7f, 0xdd, 0x82, + 0x31, 0xfe, 0xac, 0xb1, 0x1b, 0x14, 0x39, 0x28, 0xe1, 0x31, 0xbf, 0x56, 0xd7, 0x06, 0x59, 0x79, + 0xe9, 0xba, 0xe0, 0xb8, 0x5b, 0x48, 0x34, 0x07, 0x63, 0xeb, 0x61, 0xec, 0x12, 0xb3, 0x23, 0x84, + 0x62, 0x52, 0x84, 0x2e, 0xe7, 0x11, 0x70, 0xf7, 0x33, 0xe8, 0x26, 0x3c, 0x62, 0x34, 0x9a, 0xfd, + 0xc0, 0x75, 0xd3, 0x93, 0x82, 0xda, 0x23, 0x97, 0x7b, 0x62, 0xe1, 0x3e, 0x4f, 0x67, 0x1d, 0x26, + 0xf5, 0x01, 0x1c, 0x26, 0xaf, 0xc3, 0x63, 0x6e, 0x77, 0xcf, 0x6c, 0x25, 0x9d, 0xb5, 0x84, 0x6b, + 0xaa, 0x5a, 0xe3, 0x07, 0x04, 0x81, 0xc7, 0x66, 0xfa, 0x21, 0xe2, 0xfe, 0x34, 0xd0, 0xc7, 0xa0, + 0x16, 0x13, 0xf6, 0x55, 0x12, 0x91, 0x90, 0x71, 0xc4, 0x5d, 0xb2, 0xb6, 0x40, 0x39, 0x59, 0xad, + 0x7b, 0x45, 0x43, 0x82, 0x15, 0xc7, 0xf3, 0x1f, 0x80, 0xb1, 0xae, 0xf1, 0x7c, 0x20, 0x9f, 0xc5, + 0x2c, 0x3c, 0xd2, 0x7b, 0xe4, 0x1c, 0xc8, 0x73, 0xf1, 0x0f, 0x72, 0x71, 0x86, 0x86, 0x35, 0x39, + 0x80, 0x17, 0xcc, 0x81, 0x32, 0x09, 0xb6, 0x84, 0x22, 0xbd, 0x7c, 0xb4, 0xde, 0xbb, 0x14, 0x6c, + 0xf1, 0x81, 0xcf, 0xb6, 0xfa, 0x97, 0x82, 0x2d, 0x4c, 0x69, 0xa3, 0x2f, 0x5b, 0x19, 0x6b, 0x88, + 0xfb, 0xce, 0x3e, 0x72, 0x2c, 0xe6, 0xf3, 0xc0, 0x06, 0x92, 0xfd, 0x2f, 0x4b, 0x70, 0x61, 0x3f, + 0x22, 0x03, 0x74, 0xdf, 0x53, 0x30, 0x94, 0xb0, 0x23, 0x50, 0xa1, 0x99, 0x46, 0xa8, 0x56, 0xe2, + 0x87, 0xa2, 0xaf, 0x63, 0x01, 0x42, 0x3e, 0x94, 0xdb, 0x4e, 0x24, 0x5c, 0x2a, 0xf3, 0x47, 0xcd, + 0x2a, 0xa0, 0xff, 0x1d, 0x7f, 0xd1, 0x89, 0xf8, 0x46, 0xdd, 0x68, 0xc0, 0x94, 0x0d, 0x4a, 0xa1, + 0xea, 0xc4, 0xb1, 0x23, 0xcf, 0xdb, 0xae, 0x15, 0xc3, 0x6f, 0x9a, 0x92, 0x6c, 0x8c, 0xed, 0xed, + 0x4e, 0x9c, 0xc8, 0x34, 0x61, 0xce, 0xcc, 0xfe, 0xdc, 0x70, 0x26, 0xb2, 0x9e, 0x1d, 0xa2, 0x26, + 0x30, 0x24, 0x3c, 0x29, 0x56, 0xd1, 0xc9, 0x1c, 0x3c, 0x35, 0x8a, 0x6d, 0x96, 0x44, 0x82, 0xa9, + 0x60, 0x85, 0x3e, 0x6b, 0xb1, 0x34, 0x4e, 0x99, 0x6d, 0x20, 0xb6, 0x28, 0xc7, 0x93, 0x55, 0x6a, + 0x26, 0x87, 0xca, 0x46, 0x6c, 0x72, 0xa7, 0x4b, 0x57, 0xc4, 0x13, 0x92, 0xf2, 0x1b, 0x15, 0x99, + 0xe8, 0x29, 0xe1, 0x68, 0xbb, 0xc7, 0x61, 0x69, 0x01, 0xa9, 0x80, 0x03, 0x1c, 0x8f, 0x7e, 0xdd, + 0x82, 0x31, 0x6e, 0x8e, 0xce, 0x7a, 0xeb, 0xeb, 0x24, 0x26, 0x81, 0x4b, 0xa4, 0x41, 0x7f, 0xc4, + 0xe3, 0x78, 0xe9, 0xbe, 0x9a, 0xcf, 0x93, 0xd7, 0x6b, 0x5a, 0x17, 0x08, 0x77, 0x0b, 0x83, 0x9a, + 0x50, 0xf1, 0x82, 0xf5, 0x50, 0xac, 0xe4, 0x8d, 0xa3, 0x09, 0x35, 0x1f, 0xac, 0x87, 0x7a, 0x36, + 0xd3, 0x7f, 0x98, 0x51, 0x47, 0x0b, 0x70, 0x36, 0x16, 0x2e, 0x97, 0x2b, 0x5e, 0x42, 0x37, 0xc6, + 0x0b, 0x5e, 0xdb, 0x4b, 0xd9, 0x2a, 0x5c, 0x6e, 0x8c, 0xef, 0xed, 0x4e, 0x9c, 0xc5, 0x3d, 0xe0, + 0xb8, 0xe7, 0x53, 0xe8, 0x4d, 0x18, 0x96, 0x79, 0xa7, 0xb5, 0x22, 0x36, 0x47, 0xdd, 0xe3, 0x5f, + 0x0d, 0xa6, 0x15, 0x91, 0x62, 0x2a, 0x19, 0xda, 0x5f, 0x1c, 0x81, 0xee, 0xb3, 0x41, 0xf4, 0x71, + 0xa8, 0xc7, 0x2a, 0x17, 0xd6, 0x2a, 0x22, 0xbe, 0x4f, 0x7e, 0x5f, 0x71, 0x2e, 0xa9, 0xec, 0x01, + 0x9d, 0xf5, 0xaa, 0x39, 0x52, 0xab, 0x3d, 0xd1, 0x47, 0x88, 0x05, 0x8c, 0x6d, 0xc1, 0x55, 0x1f, + 0x0f, 0xed, 0x04, 0x2e, 0x66, 0x3c, 0x50, 0x0c, 0x43, 0x1b, 0xc4, 0xf1, 0xd3, 0x8d, 0x62, 0x3c, + 0xd9, 0x57, 0x18, 0xad, 0x7c, 0xd6, 0x04, 0x6f, 0xc5, 0x82, 0x13, 0xda, 0x86, 0xe1, 0x0d, 0x3e, + 0x00, 0x84, 0x21, 0xbd, 0x78, 0xd4, 0xce, 0xcd, 0x8c, 0x2a, 0xfd, 0xb9, 0x45, 0x03, 0x96, 0xec, + 0x58, 0xa4, 0x85, 0x71, 0x2c, 0xce, 0xa7, 0x6e, 0x71, 0x09, 0x23, 0x83, 0x9f, 0x89, 0x7f, 0x14, + 0x46, 0x63, 0xe2, 0x86, 0x81, 0xeb, 0xf9, 0xa4, 0x39, 0x2d, 0xbd, 0xd4, 0x07, 0x49, 0x33, 0x60, + 0x9b, 0x51, 0x6c, 0xd0, 0xc0, 0x19, 0x8a, 0xe8, 0x33, 0x16, 0x9c, 0x54, 0x09, 0x74, 0xf4, 0x83, + 0x10, 0xe1, 0x15, 0x5d, 0x28, 0x28, 0x5d, 0x8f, 0xd1, 0x6c, 0xa0, 0xbd, 0xdd, 0x89, 0x93, 0xd9, + 0x36, 0x9c, 0xe3, 0x8b, 0x5e, 0x05, 0x08, 0xd7, 0x78, 0x38, 0xc5, 0x74, 0x2a, 0x5c, 0xa4, 0x07, + 0x79, 0xd5, 0x93, 0x3c, 0xdf, 0x48, 0x52, 0xc0, 0x06, 0x35, 0x74, 0x0d, 0x80, 0x4f, 0x9b, 0xd5, + 0x9d, 0x48, 0x5a, 0xdb, 0x32, 0x4f, 0x04, 0x56, 0x14, 0xe4, 0xee, 0xee, 0x44, 0xb7, 0xcb, 0x8a, + 0x9d, 0xde, 0x1b, 0x8f, 0xa3, 0x9f, 0x80, 0xe1, 0xa4, 0xd3, 0x6e, 0x3b, 0xca, 0x81, 0x5a, 0x60, + 0x06, 0x13, 0xa7, 0x6b, 0xa8, 0x22, 0xde, 0x80, 0x25, 0x47, 0x74, 0x9b, 0x2a, 0xd5, 0x44, 0xf8, + 0xd2, 0xd8, 0x2c, 0xe2, 0x36, 0xc1, 0x08, 0x7b, 0xa7, 0xf7, 0xc9, 0xe8, 0x10, 0xdc, 0x03, 0xe7, + 0xee, 0xee, 0xc4, 0x23, 0xd9, 0xf6, 0x85, 0x50, 0xe4, 0x14, 0xf5, 0xa4, 0x89, 0xae, 0xca, 0x32, + 0x14, 0xf4, 0xb5, 0x65, 0x76, 0xf4, 0x33, 0xba, 0x0c, 0x05, 0x6b, 0xee, 0xdf, 0x67, 0xe6, 0xc3, + 0x68, 0x11, 0xce, 0xb8, 0x61, 0x90, 0xc6, 0xa1, 0xef, 0xf3, 0xda, 0x2a, 0x7c, 0xe3, 0xc3, 0x1d, + 0xac, 0xef, 0x14, 0x62, 0x9f, 0x99, 0xe9, 0x46, 0xc1, 0xbd, 0x9e, 0xb3, 0x83, 0x6c, 0x9c, 0x99, + 0xe8, 0x9c, 0x17, 0x60, 0x94, 0x6c, 0xa7, 0x24, 0x0e, 0x1c, 0xff, 0x06, 0x5e, 0x90, 0xae, 0x45, + 0x36, 0x07, 0x2e, 0x19, 0xed, 0x38, 0x83, 0x85, 0x6c, 0xb5, 0xdb, 0x2f, 0xe9, 0xc4, 0x3b, 0xbe, + 0xdb, 0x97, 0x7b, 0x7b, 0xfb, 0x7f, 0x95, 0x32, 0x06, 0xd9, 0x6a, 0x4c, 0x08, 0x0a, 0xa1, 0x1a, + 0x84, 0x4d, 0xa5, 0xfb, 0xaf, 0x16, 0xa3, 0xfb, 0xaf, 0x87, 0x4d, 0xa3, 0x56, 0x05, 0xfd, 0x97, + 0x60, 0xce, 0x87, 0x25, 0xf3, 0xcb, 0xaa, 0x07, 0x0c, 0x20, 0x36, 0x1a, 0x45, 0x72, 0x56, 0xc9, + 0xfc, 0x4b, 0x26, 0x23, 0x9c, 0xe5, 0x8b, 0x36, 0xa1, 0xba, 0x11, 0x26, 0xa9, 0xdc, 0x7e, 0x1c, + 0x71, 0xa7, 0x73, 0x25, 0x4c, 0x52, 0x66, 0x45, 0xa8, 0xd7, 0xa6, 0x2d, 0x09, 0xe6, 0x3c, 0xec, + 0xff, 0x64, 0x65, 0x1c, 0xc9, 0xb7, 0x58, 0xcc, 0xe5, 0x16, 0x09, 0xe8, 0xb4, 0x36, 0xe3, 0x6d, + 0xfe, 0x6c, 0x2e, 0xf1, 0xeb, 0x5d, 0xfd, 0x2a, 0x07, 0xdd, 0xa1, 0x14, 0x26, 0x19, 0x09, 0x23, + 0x34, 0xe7, 0x93, 0x56, 0x36, 0x05, 0xaf, 0x54, 0xc4, 0x06, 0xc3, 0x4c, 0x31, 0xdd, 0x37, 0x9b, + 0xcf, 0xfe, 0xb2, 0x05, 0xc3, 0x0d, 0xc7, 0xdd, 0x0c, 0xd7, 0xd7, 0xd1, 0xb3, 0x50, 0x6b, 0x76, + 0x62, 0x33, 0x1b, 0x50, 0xed, 0x9e, 0x67, 0x45, 0x3b, 0x56, 0x18, 0x74, 0x0c, 0xaf, 0x3b, 0xae, + 0x4c, 0x34, 0x2d, 0xf3, 0x31, 0x7c, 0x99, 0xb5, 0x60, 0x01, 0x41, 0x2f, 0xc2, 0x48, 0xdb, 0xd9, + 0x96, 0x0f, 0xe7, 0xbd, 0xd8, 0x8b, 0x1a, 0x84, 0x4d, 0x3c, 0xfb, 0x9f, 0x59, 0x30, 0xde, 0x70, + 0x12, 0xcf, 0x9d, 0xee, 0xa4, 0x1b, 0x0d, 0x2f, 0x5d, 0xeb, 0xb8, 0x9b, 0x24, 0xe5, 0xd9, 0xc5, + 0x54, 0xca, 0x4e, 0x42, 0xa7, 0x92, 0xda, 0xd7, 0x29, 0x29, 0x6f, 0x88, 0x76, 0xac, 0x30, 0xd0, + 0x9b, 0x30, 0x12, 0x39, 0x49, 0x72, 0x27, 0x8c, 0x9b, 0x98, 0xac, 0x17, 0x93, 0xdb, 0xbf, 0x42, + 0xdc, 0x98, 0xa4, 0x98, 0xac, 0x8b, 0x93, 0x56, 0x4d, 0x1f, 0x9b, 0xcc, 0xec, 0x2f, 0x58, 0xf0, + 0x58, 0x83, 0x38, 0x31, 0x89, 0x59, 0x29, 0x00, 0xf5, 0x22, 0x33, 0x7e, 0xd8, 0x69, 0xa2, 0x37, + 0xa0, 0x96, 0xd2, 0x66, 0x2a, 0x96, 0x55, 0xac, 0x58, 0xec, 0xa0, 0x74, 0x55, 0x10, 0xc7, 0x8a, + 0x8d, 0xfd, 0x57, 0x2c, 0x18, 0x65, 0x67, 0x4e, 0xb3, 0x24, 0x75, 0x3c, 0xbf, 0xab, 0x62, 0x8e, + 0x35, 0x60, 0xc5, 0x9c, 0x0b, 0x50, 0xd9, 0x08, 0xdb, 0x24, 0x7f, 0x5e, 0x7a, 0x25, 0xa4, 0xdb, + 0x6a, 0x0a, 0x41, 0xcf, 0xd3, 0x0f, 0xef, 0x05, 0xa9, 0x43, 0xa7, 0x80, 0xf4, 0x69, 0x9e, 0xe2, + 0x1f, 0x5d, 0x35, 0x63, 0x13, 0xc7, 0xfe, 0xad, 0x3a, 0x0c, 0x8b, 0x43, 0xf5, 0x81, 0x33, 0xcc, + 0xe5, 0xfe, 0xbe, 0xd4, 0x77, 0x7f, 0x9f, 0xc0, 0x90, 0xcb, 0xea, 0x71, 0x09, 0x33, 0xf2, 0x5a, + 0x21, 0x51, 0x18, 0xbc, 0xc4, 0x97, 0x16, 0x8b, 0xff, 0xc7, 0x82, 0x15, 0xfa, 0x92, 0x05, 0xa7, + 0xdc, 0x30, 0x08, 0x88, 0xab, 0x6d, 0x9c, 0x4a, 0x11, 0x87, 0xed, 0x33, 0x59, 0xa2, 0xfa, 0xc0, + 0x23, 0x07, 0xc0, 0x79, 0xf6, 0xe8, 0x65, 0x38, 0xc1, 0xfb, 0xec, 0x66, 0xc6, 0x11, 0xab, 0x0b, + 0xa9, 0x98, 0x40, 0x9c, 0xc5, 0x45, 0x93, 0xdc, 0xa1, 0x2d, 0x4a, 0x96, 0x0c, 0xe9, 0xd3, 0x33, + 0xa3, 0x58, 0x89, 0x81, 0x81, 0x62, 0x40, 0x31, 0x59, 0x8f, 0x49, 0xb2, 0x21, 0x82, 0x0e, 0x98, + 0x7d, 0x35, 0x7c, 0xb8, 0x8c, 0x55, 0xdc, 0x45, 0x09, 0xf7, 0xa0, 0x8e, 0x36, 0xc5, 0x06, 0xb3, + 0x56, 0x84, 0x0e, 0x15, 0x9f, 0xb9, 0xef, 0x3e, 0x73, 0x02, 0xaa, 0xc9, 0x86, 0x13, 0x37, 0x99, + 0x5d, 0x57, 0xe6, 0x59, 0x12, 0x2b, 0xb4, 0x01, 0xf3, 0x76, 0x34, 0x0b, 0xa7, 0x73, 0x65, 0x60, + 0x12, 0xe1, 0x30, 0x55, 0xa1, 0xfd, 0xb9, 0x02, 0x32, 0x09, 0xee, 0x7a, 0xc2, 0x74, 0x3e, 0x8c, + 0xec, 0xe3, 0x7c, 0xd8, 0x51, 0xa1, 0x6d, 0xa3, 0x6c, 0x7d, 0x7c, 0xa5, 0x90, 0x0e, 0x18, 0x28, + 0x8e, 0xed, 0xf3, 0xb9, 0x38, 0xb6, 0x13, 0x4c, 0x80, 0x9b, 0xc5, 0x08, 0x70, 0xf0, 0xa0, 0xb5, + 0x07, 0x19, 0x84, 0xf6, 0x3f, 0x2c, 0x90, 0xdf, 0x75, 0xc6, 0x71, 0x37, 0x08, 0x1d, 0x32, 0xe8, + 0xfd, 0x70, 0x52, 0x6d, 0xa1, 0x67, 0xc2, 0x4e, 0xc0, 0xe3, 0xcf, 0xca, 0xfa, 0x64, 0x14, 0x67, + 0xa0, 0x38, 0x87, 0x8d, 0xa6, 0xa0, 0x4e, 0xfb, 0x89, 0x3f, 0xca, 0xd7, 0x5a, 0xb5, 0x4d, 0x9f, + 0x5e, 0x9e, 0x17, 0x4f, 0x69, 0x1c, 0x14, 0xc2, 0x98, 0xef, 0x24, 0x29, 0x93, 0x80, 0xee, 0xa8, + 0x0f, 0x99, 0x2f, 0xce, 0xe2, 0xc7, 0x17, 0xf2, 0x84, 0x70, 0x37, 0x6d, 0xfb, 0xdb, 0x15, 0x38, + 0x91, 0xd1, 0x8c, 0x07, 0x5c, 0xa4, 0x9f, 0x85, 0x9a, 0x5c, 0x37, 0xf3, 0x55, 0x2b, 0xd4, 0xe2, + 0xaa, 0x30, 0xe8, 0xa2, 0xb5, 0xa6, 0x57, 0xd5, 0xbc, 0x51, 0x61, 0x2c, 0xb8, 0xd8, 0xc4, 0x63, + 0x4a, 0x39, 0xf5, 0x93, 0x19, 0xdf, 0x23, 0x41, 0xca, 0xc5, 0x2c, 0x46, 0x29, 0xaf, 0x2e, 0xac, + 0x98, 0x44, 0xb5, 0x52, 0xce, 0x01, 0x70, 0x9e, 0x3d, 0xfa, 0x69, 0x0b, 0x4e, 0x38, 0x77, 0x12, + 0x5d, 0x34, 0x52, 0x44, 0xac, 0x1d, 0x71, 0x91, 0xca, 0xd4, 0xa1, 0xe4, 0x2e, 0xdf, 0x4c, 0x13, + 0xce, 0x32, 0x45, 0x5f, 0xb5, 0x00, 0x91, 0x6d, 0xe2, 0xca, 0x98, 0x3a, 0x21, 0xcb, 0x50, 0x11, + 0x3b, 0xcd, 0x4b, 0x5d, 0x74, 0xb9, 0x56, 0xef, 0x6e, 0xc7, 0x3d, 0x64, 0xb0, 0xff, 0x71, 0x59, + 0x4d, 0x28, 0x1d, 0xc6, 0xe9, 0x18, 0xe1, 0x64, 0xd6, 0xe1, 0xc3, 0xc9, 0xf4, 0xb1, 0x7c, 0x77, + 0x1a, 0x5a, 0x26, 0xfd, 0xa6, 0xf4, 0x80, 0xd2, 0x6f, 0x7e, 0xca, 0xca, 0xd4, 0x67, 0x19, 0xb9, + 0xf8, 0x6a, 0xb1, 0x21, 0xa4, 0x93, 0x3c, 0x64, 0x20, 0xa7, 0xdd, 0xb3, 0x91, 0x22, 0x54, 0x9b, + 0x1a, 0x68, 0x07, 0xd2, 0x86, 0xff, 0xb6, 0x0c, 0x23, 0xc6, 0x4a, 0xda, 0xd3, 0x2c, 0xb2, 0x1e, + 0x32, 0xb3, 0xa8, 0x74, 0x00, 0xb3, 0xe8, 0x27, 0xa1, 0xee, 0x4a, 0x2d, 0x5f, 0x4c, 0x85, 0xd2, + 0xfc, 0xda, 0xa1, 0x15, 0xbd, 0x6a, 0xc2, 0x9a, 0x27, 0x9a, 0xcb, 0xe4, 0xaf, 0x88, 0x15, 0xa2, + 0xc2, 0x56, 0x88, 0x5e, 0x09, 0x26, 0x62, 0xa5, 0xe8, 0x7e, 0x86, 0x95, 0xf1, 0x89, 0x3c, 0xf1, + 0x5e, 0x32, 0xd0, 0x9b, 0x97, 0xf1, 0x59, 0x9e, 0x97, 0xcd, 0xd8, 0xc4, 0xb1, 0xbf, 0x6d, 0xa9, + 0x8f, 0x7b, 0x1f, 0x92, 0xda, 0x6f, 0x67, 0x93, 0xda, 0x2f, 0x15, 0xd2, 0xcd, 0x7d, 0xb2, 0xd9, + 0xaf, 0xc3, 0xf0, 0x4c, 0xd8, 0x6e, 0x3b, 0x41, 0x13, 0xfd, 0x20, 0x0c, 0xbb, 0xfc, 0xa7, 0x70, + 0xec, 0xb0, 0xe3, 0x41, 0x01, 0xc5, 0x12, 0x86, 0x1e, 0x87, 0x8a, 0x13, 0xb7, 0xa4, 0x33, 0x87, + 0x45, 0x98, 0x4c, 0xc7, 0xad, 0x04, 0xb3, 0x56, 0xfb, 0xef, 0x57, 0x00, 0x66, 0xc2, 0x76, 0xe4, + 0xc4, 0xa4, 0xb9, 0x1a, 0xb2, 0x0a, 0x69, 0xc7, 0x7a, 0xa8, 0xa6, 0x37, 0x4b, 0x0f, 0xf3, 0xc1, + 0x9a, 0x71, 0xb8, 0x52, 0xbe, 0xcf, 0x87, 0x2b, 0x7d, 0xce, 0xcb, 0x2a, 0x0f, 0xd1, 0x79, 0x99, + 0xfd, 0x39, 0x0b, 0x10, 0x1d, 0x34, 0x61, 0x40, 0x82, 0x54, 0x1f, 0x68, 0x4f, 0x41, 0xdd, 0x95, + 0xad, 0xc2, 0xb0, 0xd2, 0x2a, 0x42, 0x02, 0xb0, 0xc6, 0x19, 0x60, 0x87, 0xfc, 0x94, 0xd4, 0xdf, + 0xe5, 0x6c, 0x70, 0x2a, 0xd3, 0xfa, 0x42, 0x9d, 0xdb, 0xbf, 0x5d, 0x82, 0x47, 0xf8, 0x92, 0xbc, + 0xe8, 0x04, 0x4e, 0x8b, 0xb4, 0xa9, 0x54, 0x83, 0x86, 0x28, 0xb8, 0x74, 0x6b, 0xe6, 0xc9, 0x60, + 0xd3, 0xa3, 0xce, 0x5d, 0x3e, 0xe7, 0xf8, 0x2c, 0x9b, 0x0f, 0xbc, 0x14, 0x33, 0xe2, 0x28, 0x81, + 0x9a, 0x2c, 0xc9, 0x2d, 0x74, 0x71, 0x41, 0x8c, 0x94, 0x5a, 0x12, 0xeb, 0x26, 0xc1, 0x8a, 0x11, + 0x35, 0x5c, 0xfd, 0xd0, 0xdd, 0xc4, 0x24, 0x0a, 0x99, 0xde, 0x35, 0x62, 0xfd, 0x16, 0x44, 0x3b, + 0x56, 0x18, 0xf6, 0x6f, 0x5b, 0x90, 0x5f, 0x91, 0x8c, 0x72, 0x55, 0xd6, 0x3d, 0xcb, 0x55, 0x1d, + 0xa0, 0x5e, 0xd4, 0x8f, 0xc3, 0x88, 0x93, 0x52, 0x23, 0x82, 0x6f, 0xbb, 0xcb, 0x87, 0x3b, 0xd6, + 0x58, 0x0c, 0x9b, 0xde, 0xba, 0xc7, 0xb6, 0xdb, 0x26, 0x39, 0xfb, 0xbf, 0x55, 0x60, 0xac, 0x2b, + 0x25, 0x02, 0xbd, 0x04, 0xa3, 0xae, 0x18, 0x1e, 0x91, 0x74, 0x68, 0xd5, 0xcd, 0xd8, 0x30, 0x0d, + 0xc3, 0x19, 0xcc, 0x01, 0x06, 0xe8, 0x3c, 0x9c, 0x89, 0xe9, 0x46, 0xbf, 0x43, 0xa6, 0xd7, 0x53, + 0x12, 0xaf, 0x10, 0x37, 0x0c, 0x9a, 0xbc, 0xa8, 0x5a, 0xb9, 0xf1, 0xe8, 0xde, 0xee, 0xc4, 0x19, + 0xdc, 0x0d, 0xc6, 0xbd, 0x9e, 0x41, 0x11, 0x9c, 0xf0, 0x4d, 0x1b, 0x50, 0x6c, 0x00, 0x0e, 0x65, + 0x3e, 0x2a, 0x1b, 0x21, 0xd3, 0x8c, 0xb3, 0x0c, 0xb2, 0x86, 0x64, 0xf5, 0x01, 0x19, 0x92, 0x9f, + 0xd6, 0x86, 0x24, 0x3f, 0x7f, 0xff, 0x70, 0xc1, 0x29, 0x31, 0xc7, 0x6d, 0x49, 0xbe, 0x02, 0x35, + 0x19, 0x9b, 0x34, 0x50, 0x4c, 0x8f, 0x49, 0xa7, 0x8f, 0x46, 0xbb, 0x5b, 0x82, 0x1e, 0x9b, 0x10, + 0x3a, 0xcf, 0xf4, 0x8a, 0x9f, 0x99, 0x67, 0x07, 0x5b, 0xf5, 0xd1, 0x36, 0x8f, 0xcb, 0xe2, 0x6b, + 0xdb, 0x87, 0x8a, 0xde, 0x44, 0xe9, 0x50, 0x2d, 0x95, 0x29, 0xa0, 0xc2, 0xb5, 0x2e, 0x02, 0x68, + 0x43, 0x4d, 0xc4, 0x81, 0xab, 0x63, 0x5f, 0x6d, 0xcf, 0x61, 0x03, 0x8b, 0xee, 0xa9, 0xbd, 0x20, + 0x49, 0x1d, 0xdf, 0xbf, 0xe2, 0x05, 0xa9, 0x70, 0x0e, 0xaa, 0x45, 0x7c, 0x5e, 0x83, 0xb0, 0x89, + 0x77, 0xfe, 0x7d, 0xc6, 0x77, 0x39, 0xc8, 0xf7, 0xdc, 0x80, 0xc7, 0xe6, 0xbc, 0x54, 0x65, 0x2f, + 0xa8, 0x71, 0x44, 0xed, 0x30, 0x95, 0x8d, 0x63, 0xf5, 0xcd, 0xc6, 0x31, 0xb2, 0x07, 0x4a, 0xd9, + 0x64, 0x87, 0x7c, 0xf6, 0x80, 0xfd, 0x12, 0x9c, 0x9d, 0xf3, 0xd2, 0xcb, 0x9e, 0x4f, 0x0e, 0xc8, + 0xc4, 0xfe, 0xcd, 0x21, 0x18, 0x35, 0xf3, 0xdf, 0x0e, 0x92, 0x50, 0xf4, 0x05, 0x6a, 0x6a, 0x89, + 0xb7, 0xf3, 0xd4, 0xa1, 0xd9, 0xad, 0x23, 0x27, 0xe3, 0xf5, 0xee, 0x31, 0xc3, 0xda, 0xd2, 0x3c, + 0xb1, 0x29, 0x00, 0xba, 0x03, 0xd5, 0x75, 0x16, 0xdd, 0x5e, 0x2e, 0x22, 0xb2, 0xa0, 0x57, 0x8f, + 0xea, 0x69, 0xc6, 0xe3, 0xe3, 0x39, 0x3f, 0xba, 0x42, 0xc6, 0xd9, 0x94, 0x29, 0x23, 0x22, 0x53, + 0x24, 0x4b, 0x29, 0x8c, 0x7e, 0xaa, 0xbe, 0x7a, 0x08, 0x55, 0x9f, 0x51, 0xbc, 0x43, 0x0f, 0x48, + 0xf1, 0xb2, 0x4c, 0x85, 0x74, 0x83, 0xd9, 0x6f, 0x22, 0x84, 0x7c, 0x98, 0x75, 0x82, 0x91, 0xa9, + 0x90, 0x01, 0xe3, 0x3c, 0x3e, 0xfa, 0x84, 0x52, 0xdd, 0xb5, 0x22, 0xfc, 0xaa, 0xe6, 0x88, 0x3e, + 0x6e, 0xad, 0xfd, 0xb9, 0x12, 0x9c, 0x9c, 0x0b, 0x3a, 0xcb, 0x73, 0xcb, 0x9d, 0x35, 0xdf, 0x73, + 0xaf, 0x91, 0x1d, 0xaa, 0x9a, 0x37, 0xc9, 0xce, 0xfc, 0xac, 0x98, 0x41, 0x6a, 0xcc, 0x5c, 0xa3, + 0x8d, 0x98, 0xc3, 0xa8, 0x32, 0x5a, 0xf7, 0x82, 0x16, 0x89, 0xa3, 0xd8, 0x13, 0x2e, 0x4f, 0x43, + 0x19, 0x5d, 0xd6, 0x20, 0x6c, 0xe2, 0x51, 0xda, 0xe1, 0x9d, 0x80, 0xc4, 0x79, 0x43, 0x76, 0x89, + 0x36, 0x62, 0x0e, 0xa3, 0x48, 0x69, 0xdc, 0x49, 0x52, 0x31, 0x18, 0x15, 0xd2, 0x2a, 0x6d, 0xc4, + 0x1c, 0x46, 0x67, 0x7a, 0xd2, 0x59, 0x63, 0x81, 0x1b, 0xb9, 0x78, 0xf5, 0x15, 0xde, 0x8c, 0x25, + 0x9c, 0xa2, 0x6e, 0x92, 0x9d, 0x59, 0xba, 0xeb, 0xcd, 0xa5, 0xad, 0x5c, 0xe3, 0xcd, 0x58, 0xc2, + 0x59, 0x35, 0xb8, 0x6c, 0x77, 0x7c, 0xcf, 0x55, 0x83, 0xcb, 0x8a, 0xdf, 0x67, 0xff, 0xfc, 0xcb, + 0x16, 0x8c, 0x9a, 0xe1, 0x56, 0xa8, 0x95, 0xb3, 0x71, 0x97, 0xba, 0x8a, 0x89, 0xfe, 0x68, 0xaf, + 0x9b, 0x93, 0x5a, 0x5e, 0x1a, 0x46, 0xc9, 0x73, 0x24, 0x68, 0x79, 0x01, 0x61, 0xa7, 0xe8, 0x3c, + 0x4c, 0x2b, 0x13, 0xcb, 0x35, 0x13, 0x36, 0xc9, 0x21, 0x8c, 0x64, 0xfb, 0x16, 0x8c, 0x75, 0xe5, + 0x2a, 0x0d, 0x60, 0x5a, 0xec, 0x9b, 0x29, 0x6a, 0x63, 0x18, 0xa1, 0x84, 0x65, 0x69, 0x95, 0x19, + 0x18, 0xe3, 0x13, 0x89, 0x72, 0x5a, 0x71, 0x37, 0x48, 0x5b, 0xe5, 0x9f, 0x31, 0xff, 0xfa, 0xcd, + 0x3c, 0x10, 0x77, 0xe3, 0xdb, 0x9f, 0xb7, 0xe0, 0x44, 0x26, 0x7d, 0xac, 0x20, 0x23, 0x88, 0xcd, + 0xb4, 0x90, 0x45, 0xff, 0xb1, 0x10, 0xe8, 0x32, 0x5b, 0x4c, 0xf5, 0x4c, 0xd3, 0x20, 0x6c, 0xe2, + 0xd9, 0x5f, 0x2e, 0x41, 0x4d, 0x46, 0x50, 0x0c, 0x20, 0xca, 0x67, 0x2d, 0x38, 0xa1, 0xce, 0x34, + 0x98, 0xb3, 0xac, 0x54, 0x44, 0xac, 0x3f, 0x95, 0x40, 0x6d, 0xb7, 0x83, 0xf5, 0x50, 0x5b, 0xe4, + 0xd8, 0x64, 0x86, 0xb3, 0xbc, 0xd1, 0x4d, 0x80, 0x64, 0x27, 0x49, 0x49, 0xdb, 0x70, 0xdb, 0xd9, + 0xc6, 0x8c, 0x9b, 0x74, 0xc3, 0x98, 0xd0, 0xf9, 0x75, 0x3d, 0x6c, 0x92, 0x15, 0x85, 0xa9, 0x4d, + 0x28, 0xdd, 0x86, 0x0d, 0x4a, 0xf6, 0xdf, 0x2d, 0xc1, 0xe9, 0xbc, 0x48, 0xe8, 0xc3, 0x30, 0x2a, + 0xb9, 0x1b, 0xb7, 0x40, 0xc9, 0xb0, 0x91, 0x51, 0x6c, 0xc0, 0xee, 0xee, 0x4e, 0x4c, 0x74, 0xdf, + 0xc2, 0x35, 0x69, 0xa2, 0xe0, 0x0c, 0x31, 0x7e, 0xb0, 0x24, 0x4e, 0x40, 0x1b, 0x3b, 0xd3, 0x51, + 0x24, 0x4e, 0x87, 0x8c, 0x83, 0x25, 0x13, 0x8a, 0x73, 0xd8, 0x68, 0x19, 0xce, 0x1a, 0x2d, 0xd7, + 0x89, 0xd7, 0xda, 0x58, 0x0b, 0x63, 0xb9, 0xb3, 0x7a, 0x5c, 0x07, 0x76, 0x75, 0xe3, 0xe0, 0x9e, + 0x4f, 0xd2, 0xd5, 0xde, 0x75, 0x22, 0xc7, 0xf5, 0xd2, 0x1d, 0xe1, 0x87, 0x54, 0xba, 0x69, 0x46, + 0xb4, 0x63, 0x85, 0x61, 0x2f, 0x42, 0x65, 0xc0, 0x11, 0x34, 0x90, 0x45, 0xff, 0x0a, 0xd4, 0x28, + 0x39, 0x69, 0xde, 0x15, 0x41, 0x32, 0x84, 0x9a, 0xbc, 0xc8, 0x01, 0xd9, 0x50, 0xf6, 0x1c, 0x79, + 0x76, 0xa7, 0x5e, 0x6b, 0x3e, 0x49, 0x3a, 0x6c, 0x93, 0x4c, 0x81, 0xe8, 0x29, 0x28, 0x93, 0xed, + 0x28, 0x7f, 0x48, 0x77, 0x69, 0x3b, 0xf2, 0x62, 0x92, 0x50, 0x24, 0xb2, 0x1d, 0xa1, 0xf3, 0x50, + 0xf2, 0x9a, 0x62, 0x91, 0x02, 0x81, 0x53, 0x9a, 0x9f, 0xc5, 0x25, 0xaf, 0x69, 0x6f, 0x43, 0x5d, + 0xdd, 0x1c, 0x81, 0x36, 0xa5, 0xee, 0xb6, 0x8a, 0x08, 0x79, 0x92, 0x74, 0xfb, 0x68, 0xed, 0x0e, + 0x80, 0xce, 0xa3, 0x2b, 0x4a, 0xbf, 0x5c, 0x80, 0x8a, 0x1b, 0x8a, 0x1c, 0xdf, 0x9a, 0x26, 0xc3, + 0x94, 0x36, 0x83, 0xd8, 0xb7, 0xe0, 0xe4, 0xb5, 0x20, 0xbc, 0xc3, 0x4a, 0x63, 0xb3, 0x92, 0x56, + 0x94, 0xf0, 0x3a, 0xfd, 0x91, 0x37, 0x11, 0x18, 0x14, 0x73, 0x98, 0x2a, 0x7b, 0x54, 0xea, 0x57, + 0xf6, 0xc8, 0xfe, 0xa4, 0x05, 0xa7, 0x55, 0x36, 0x90, 0xd4, 0xc6, 0x2f, 0xc1, 0xe8, 0x5a, 0xc7, + 0xf3, 0x9b, 0xb2, 0x50, 0x56, 0xce, 0x4d, 0xd1, 0x30, 0x60, 0x38, 0x83, 0x49, 0x37, 0x55, 0x6b, + 0x5e, 0xe0, 0xc4, 0x3b, 0xcb, 0x5a, 0xfd, 0x2b, 0x8d, 0xd0, 0x50, 0x10, 0x6c, 0x60, 0xd9, 0x9f, + 0x35, 0x45, 0x10, 0xf9, 0x47, 0x03, 0xf4, 0xec, 0x0d, 0xa8, 0xba, 0xea, 0xac, 0xf7, 0x50, 0xc5, + 0xfc, 0x54, 0x7e, 0x39, 0xf3, 0xf7, 0x73, 0x6a, 0xf6, 0x3f, 0x29, 0xc1, 0x89, 0x4c, 0xcd, 0x12, + 0xe4, 0x43, 0x8d, 0xf8, 0xcc, 0x95, 0x27, 0x87, 0xd8, 0x51, 0xcb, 0x45, 0xaa, 0x69, 0x71, 0x49, + 0xd0, 0xc5, 0x8a, 0xc3, 0xc3, 0x71, 0xa4, 0xf6, 0x12, 0x8c, 0x4a, 0x81, 0x3e, 0xe4, 0xb4, 0x7d, + 0x31, 0x0b, 0xd5, 0x00, 0xb8, 0x64, 0xc0, 0x70, 0x06, 0xd3, 0xfe, 0x9d, 0x32, 0x8c, 0x73, 0xdf, + 0x67, 0x53, 0x45, 0xbd, 0x2c, 0x4a, 0x2b, 0xeb, 0x2f, 0xe8, 0xca, 0x42, 0xbc, 0x23, 0xd7, 0x8e, + 0x5a, 0x9d, 0xb9, 0x37, 0xa3, 0x81, 0xe2, 0x31, 0x7e, 0x31, 0x17, 0x8f, 0xc1, 0x17, 0xdb, 0xd6, + 0x31, 0x49, 0xf4, 0xbd, 0x15, 0xa0, 0xf1, 0xb7, 0x4a, 0x70, 0x2a, 0x57, 0xfa, 0x1a, 0x7d, 0x31, + 0x5b, 0xf6, 0xd1, 0x2a, 0xc2, 0x43, 0x76, 0xcf, 0x6a, 0xc8, 0x07, 0x2b, 0xfe, 0xf8, 0x80, 0xa6, + 0x8a, 0xfd, 0x7b, 0x25, 0x38, 0x99, 0xad, 0xd9, 0xfd, 0x10, 0xf6, 0xd4, 0x7b, 0xa0, 0xce, 0xca, + 0xd2, 0xb2, 0x7b, 0xc6, 0xb8, 0x23, 0x8e, 0x97, 0x32, 0x95, 0x8d, 0x58, 0xc3, 0x1f, 0x8a, 0x9a, + 0x9a, 0xf6, 0xdf, 0xb6, 0xe0, 0x1c, 0x7f, 0xcb, 0xfc, 0x38, 0xfc, 0x8b, 0xbd, 0x7a, 0xf7, 0xb5, + 0x62, 0x05, 0xcc, 0x55, 0xc4, 0xda, 0xaf, 0x7f, 0xd9, 0xfd, 0x46, 0x42, 0xda, 0xec, 0x50, 0x78, + 0x08, 0x85, 0x3d, 0xd0, 0x60, 0xb0, 0x7f, 0xaf, 0x0c, 0xfa, 0x4a, 0x27, 0xe4, 0x89, 0xcc, 0xa6, + 0x42, 0x2a, 0x83, 0xad, 0xec, 0x04, 0xae, 0xbe, 0x3c, 0xaa, 0x96, 0x4b, 0x6c, 0xfa, 0x39, 0x0b, + 0x46, 0xbc, 0xc0, 0x4b, 0x3d, 0x87, 0x19, 0xcf, 0xc5, 0x5c, 0x49, 0xa3, 0xd8, 0xcd, 0x73, 0xca, + 0x61, 0x6c, 0x7a, 0x6f, 0x15, 0x33, 0x6c, 0x72, 0x46, 0x1f, 0x15, 0x21, 0x93, 0xe5, 0xc2, 0x72, + 0xf2, 0x6a, 0xb9, 0x38, 0xc9, 0x08, 0xaa, 0x31, 0x49, 0xe3, 0x82, 0x52, 0x59, 0x31, 0x25, 0xa5, + 0x8a, 0x4c, 0xea, 0xcb, 0x35, 0x69, 0x33, 0xe6, 0x8c, 0xec, 0x04, 0x50, 0x77, 0x5f, 0x1c, 0x30, + 0x1c, 0x6d, 0x0a, 0xea, 0x4e, 0x27, 0x0d, 0xdb, 0xb4, 0x9b, 0x84, 0x83, 0x59, 0x07, 0xdc, 0x49, + 0x00, 0xd6, 0x38, 0xf6, 0x17, 0xab, 0x90, 0x4b, 0x35, 0x42, 0xdb, 0xe6, 0x75, 0x64, 0x56, 0xb1, + 0xd7, 0x91, 0x29, 0x61, 0x7a, 0x5d, 0x49, 0x86, 0x5a, 0x50, 0x8d, 0x36, 0x9c, 0x44, 0xda, 0xc6, + 0xaf, 0xc8, 0x6e, 0x5a, 0xa6, 0x8d, 0x77, 0x77, 0x27, 0x7e, 0x6c, 0x30, 0x5f, 0x0b, 0x1d, 0xab, + 0x53, 0x3c, 0x73, 0x5f, 0xb3, 0x66, 0x34, 0x30, 0xa7, 0x7f, 0x90, 0x4b, 0x79, 0x3e, 0x25, 0x0a, + 0x09, 0x63, 0x92, 0x74, 0xfc, 0x54, 0x8c, 0x86, 0x57, 0x0a, 0x9c, 0x65, 0x9c, 0xb0, 0x4e, 0x92, + 0xe5, 0xff, 0xb1, 0xc1, 0x14, 0x7d, 0x18, 0xea, 0x49, 0xea, 0xc4, 0xe9, 0x21, 0xd3, 0xda, 0x54, + 0xa7, 0xaf, 0x48, 0x22, 0x58, 0xd3, 0x43, 0xaf, 0xb2, 0x42, 0x89, 0x5e, 0xb2, 0x71, 0xc8, 0x48, + 0x67, 0x59, 0x54, 0x51, 0x50, 0xc0, 0x06, 0x35, 0xba, 0xf5, 0x60, 0x63, 0x9b, 0x87, 0xf7, 0xd4, + 0xd8, 0xde, 0x52, 0xa9, 0x42, 0xac, 0x20, 0xd8, 0xc0, 0xb2, 0x7f, 0x08, 0xb2, 0x59, 0xde, 0x68, + 0x42, 0x26, 0x95, 0x73, 0xdf, 0x13, 0x8b, 0x58, 0xce, 0xe4, 0x7f, 0xff, 0xba, 0x05, 0x66, 0x2a, + 0x3a, 0x7a, 0x83, 0xe7, 0xbc, 0x5b, 0x45, 0x9c, 0x17, 0x18, 0x74, 0x27, 0x17, 0x9d, 0x28, 0x77, + 0x70, 0x25, 0x13, 0xdf, 0xcf, 0xbf, 0x0f, 0x6a, 0x12, 0x7a, 0x20, 0xa3, 0xee, 0x13, 0x70, 0x26, + 0x7f, 0x59, 0xab, 0xf0, 0x35, 0xb7, 0xe2, 0xb0, 0x13, 0xe5, 0x37, 0x92, 0xec, 0x32, 0x4f, 0xcc, + 0x61, 0x74, 0x3b, 0xb6, 0xe9, 0x05, 0xcd, 0xfc, 0x46, 0xf2, 0x9a, 0x17, 0x34, 0x31, 0x83, 0x0c, + 0x70, 0x29, 0xdd, 0x6f, 0x58, 0x70, 0x61, 0xbf, 0x3b, 0x65, 0xd1, 0xe3, 0x50, 0xb9, 0xe3, 0xc4, + 0xb2, 0x82, 0x2d, 0x53, 0x94, 0xb7, 0x9c, 0x38, 0xc0, 0xac, 0x15, 0xed, 0xc0, 0x10, 0x8f, 0x01, + 0x11, 0xd6, 0xfa, 0x2b, 0xc5, 0xde, 0x70, 0x7b, 0x8d, 0x18, 0xdb, 0x05, 0x1e, 0x7f, 0x82, 0x05, + 0x43, 0xfb, 0x3b, 0x16, 0xa0, 0xa5, 0x2d, 0x12, 0xc7, 0x5e, 0xd3, 0x88, 0x5a, 0x41, 0x2f, 0xc0, + 0xe8, 0xed, 0x95, 0xa5, 0xeb, 0xcb, 0xa1, 0x17, 0xb0, 0xaa, 0x0f, 0x46, 0x62, 0xdb, 0x55, 0xa3, + 0x1d, 0x67, 0xb0, 0xd0, 0x0c, 0x8c, 0xdd, 0x7e, 0x83, 0x6e, 0x7e, 0xcd, 0x6a, 0xf9, 0x25, 0xed, + 0xee, 0xbc, 0xfa, 0x4a, 0x0e, 0x88, 0xbb, 0xf1, 0xd1, 0x12, 0x9c, 0x6b, 0xf3, 0xed, 0x06, 0x2f, + 0x72, 0xcd, 0xf7, 0x1e, 0x2a, 0x8d, 0xe4, 0xb1, 0xbd, 0xdd, 0x89, 0x73, 0x8b, 0xbd, 0x10, 0x70, + 0xef, 0xe7, 0xec, 0xf7, 0x01, 0xe2, 0xc1, 0x2a, 0x33, 0xbd, 0x22, 0x0f, 0xfa, 0xee, 0xc4, 0xed, + 0xaf, 0x55, 0xe1, 0x54, 0xae, 0xbe, 0x21, 0xdd, 0xea, 0x75, 0x87, 0x3a, 0x1c, 0x79, 0xfd, 0xee, + 0x16, 0x6f, 0xa0, 0xe0, 0x89, 0x00, 0xaa, 0x5e, 0x10, 0x75, 0xd2, 0x62, 0x32, 0xc7, 0xb8, 0x10, + 0xf3, 0x94, 0xa0, 0xe1, 0x24, 0xa2, 0x7f, 0x31, 0x67, 0x53, 0x64, 0x28, 0x46, 0xc6, 0x18, 0xaf, + 0x3c, 0x20, 0x77, 0xc0, 0xa7, 0x74, 0x60, 0x44, 0xb5, 0x88, 0x83, 0xfa, 0xdc, 0x60, 0x39, 0xee, + 0x03, 0xb6, 0x5f, 0x2b, 0xc1, 0x88, 0xf1, 0xd1, 0xd0, 0x2f, 0x65, 0x0b, 0xb5, 0x58, 0xc5, 0xbd, + 0x12, 0xa3, 0x3f, 0xa9, 0x4b, 0xb1, 0xf0, 0x57, 0x7a, 0xba, 0xbb, 0x46, 0xcb, 0xdd, 0xdd, 0x89, + 0xd3, 0xb9, 0x2a, 0x2c, 0x99, 0xba, 0x2d, 0xe7, 0x3f, 0x0e, 0xa7, 0x72, 0x64, 0x7a, 0xbc, 0xf2, + 0x6a, 0xf6, 0x2e, 0xde, 0x23, 0xba, 0xa5, 0xcc, 0x2e, 0x7b, 0x8b, 0x76, 0x99, 0xbe, 0xa2, 0x7d, + 0x00, 0x77, 0x5c, 0x2e, 0x47, 0xae, 0x34, 0x60, 0x8e, 0xdc, 0x33, 0x50, 0x8b, 0x42, 0xdf, 0x73, + 0x3d, 0x55, 0xd2, 0x8b, 0x65, 0xe5, 0x2d, 0x8b, 0x36, 0xac, 0xa0, 0xe8, 0x0e, 0xd4, 0xd5, 0xb5, + 0xc5, 0x22, 0x08, 0xb1, 0x28, 0x57, 0xaf, 0x32, 0x5a, 0xf4, 0x75, 0xc4, 0x9a, 0x17, 0xb2, 0x61, + 0x88, 0x2d, 0x82, 0x32, 0xe0, 0x97, 0x65, 0x70, 0xb2, 0xd5, 0x31, 0xc1, 0x02, 0x62, 0x7f, 0xa3, + 0x0e, 0x67, 0x7b, 0x15, 0x99, 0x45, 0x1f, 0x83, 0x21, 0x2e, 0x63, 0x31, 0x75, 0xcc, 0x7b, 0xf1, + 0x98, 0x63, 0x04, 0x85, 0x58, 0xec, 0x37, 0x16, 0x3c, 0x05, 0x77, 0xdf, 0x59, 0x13, 0x23, 0xe4, + 0x78, 0xb8, 0x2f, 0x38, 0x9a, 0xfb, 0x82, 0xc3, 0xb9, 0xfb, 0xce, 0x1a, 0xda, 0x86, 0x6a, 0xcb, + 0x4b, 0x89, 0x23, 0x9c, 0x08, 0xb7, 0x8e, 0x85, 0x39, 0x71, 0xb8, 0x95, 0xc6, 0x7e, 0x62, 0xce, + 0x10, 0x7d, 0xdd, 0x82, 0x53, 0x6b, 0xd9, 0x84, 0x58, 0xa1, 0x3c, 0x9d, 0x63, 0x28, 0x24, 0x9c, + 0x65, 0xc4, 0x6f, 0xa4, 0xc8, 0x35, 0xe2, 0xbc, 0x38, 0xe8, 0xd3, 0x16, 0x0c, 0xaf, 0x7b, 0xbe, + 0x51, 0x53, 0xf2, 0x18, 0x3e, 0xce, 0x65, 0xc6, 0x40, 0xef, 0x38, 0xf8, 0xff, 0x04, 0x4b, 0xce, + 0xfd, 0x56, 0xaa, 0xa1, 0xa3, 0xae, 0x54, 0xc3, 0x0f, 0x68, 0xa5, 0xfa, 0x8c, 0x05, 0x75, 0xd5, + 0xd3, 0x22, 0xc9, 0xf1, 0xc3, 0xc7, 0xf8, 0xc9, 0xb9, 0xe7, 0x44, 0xfd, 0xc5, 0x9a, 0x39, 0xfa, + 0x92, 0x05, 0x23, 0xce, 0x9b, 0x9d, 0x98, 0x34, 0xc9, 0x56, 0x18, 0x25, 0xe2, 0x16, 0xa8, 0xd7, + 0x8a, 0x17, 0x66, 0x9a, 0x32, 0x99, 0x25, 0x5b, 0x4b, 0x51, 0x22, 0x92, 0x11, 0x74, 0x03, 0x36, + 0x45, 0xb0, 0x77, 0x4b, 0x30, 0xb1, 0x0f, 0x05, 0xf4, 0x12, 0x8c, 0x86, 0x71, 0xcb, 0x09, 0xbc, + 0x37, 0xcd, 0x0c, 0x77, 0x65, 0x65, 0x2d, 0x19, 0x30, 0x9c, 0xc1, 0x34, 0xd3, 0x30, 0x4b, 0xfb, + 0xa4, 0x61, 0x5e, 0x80, 0x4a, 0x4c, 0xa2, 0x30, 0xbf, 0x59, 0x60, 0x81, 0xc0, 0x0c, 0x82, 0x9e, + 0x80, 0xb2, 0x13, 0x79, 0x22, 0xfc, 0x44, 0xed, 0x81, 0xa6, 0x97, 0xe7, 0x31, 0x6d, 0xcf, 0x64, + 0x85, 0x57, 0xef, 0x4b, 0x56, 0x38, 0x5d, 0x06, 0xc4, 0xd9, 0xc5, 0x90, 0x5e, 0x06, 0xb2, 0x67, + 0x0a, 0xf6, 0x57, 0xcb, 0xf0, 0xc4, 0x3d, 0xc7, 0x8b, 0x8e, 0xbe, 0xb1, 0xee, 0x11, 0x7d, 0x23, + 0xbb, 0xa7, 0xb4, 0x5f, 0xf7, 0x94, 0xfb, 0x74, 0xcf, 0xa7, 0xe9, 0x34, 0x90, 0x95, 0x01, 0x8a, + 0xb9, 0x90, 0xa8, 0x5f, 0xa1, 0x01, 0x31, 0x03, 0x24, 0x14, 0x6b, 0xbe, 0x74, 0x0f, 0x90, 0x49, + 0x41, 0xac, 0x16, 0xb1, 0x0c, 0xf4, 0xad, 0x14, 0xc0, 0xc7, 0x7e, 0xbf, 0xbc, 0x46, 0xfb, 0xe7, + 0x4b, 0xf0, 0xd4, 0x00, 0xda, 0xdb, 0x1c, 0xc5, 0xd6, 0x80, 0xa3, 0xf8, 0x7b, 0xfb, 0x33, 0xd9, + 0x7f, 0xc9, 0x82, 0xf3, 0xfd, 0x17, 0x0f, 0xf4, 0x3c, 0x8c, 0xac, 0xc5, 0x4e, 0xe0, 0x6e, 0xb0, + 0x4b, 0xd6, 0x64, 0xa7, 0xb0, 0xbe, 0xd6, 0xcd, 0xd8, 0xc4, 0xa1, 0xdb, 0x5b, 0x5e, 0xd8, 0xdd, + 0xc0, 0x90, 0x29, 0x63, 0x74, 0x7b, 0xbb, 0x9a, 0x07, 0xe2, 0x6e, 0x7c, 0xfb, 0x4f, 0x4a, 0xbd, + 0xc5, 0xe2, 0x46, 0xc6, 0x41, 0xbe, 0x93, 0xf8, 0x0a, 0xa5, 0x01, 0x74, 0x49, 0xf9, 0x7e, 0xeb, + 0x92, 0x4a, 0x3f, 0x5d, 0x82, 0x66, 0xe1, 0xb4, 0x71, 0x1f, 0x01, 0x4f, 0x03, 0xe4, 0x61, 0x76, + 0x2a, 0x37, 0x7e, 0x39, 0x07, 0xc7, 0x5d, 0x4f, 0xa0, 0x67, 0xa1, 0xe6, 0x05, 0x09, 0x71, 0x3b, + 0x31, 0x0f, 0xef, 0x34, 0x52, 0x2f, 0xe6, 0x45, 0x3b, 0x56, 0x18, 0xf6, 0x2f, 0x97, 0xe0, 0xb1, + 0xbe, 0x76, 0xd6, 0x7d, 0xd2, 0x5d, 0xe6, 0xe7, 0xa8, 0xdc, 0x9f, 0xcf, 0x61, 0x76, 0x52, 0x75, + 0xdf, 0x4e, 0xfa, 0xfd, 0xfe, 0x03, 0x93, 0xda, 0xdc, 0xdf, 0xb7, 0xbd, 0xf4, 0x32, 0x9c, 0x70, + 0xa2, 0x88, 0xe3, 0xb1, 0x28, 0xad, 0x5c, 0x6d, 0x8c, 0x69, 0x13, 0x88, 0xb3, 0xb8, 0x03, 0xad, + 0x9e, 0x7f, 0x68, 0x41, 0x1d, 0x93, 0x75, 0xae, 0x1d, 0xd0, 0x6d, 0xd1, 0x45, 0x56, 0x11, 0x55, + 0xf4, 0x68, 0xc7, 0x26, 0x1e, 0xab, 0x2e, 0xd7, 0xab, 0xb3, 0xbb, 0xef, 0xad, 0x28, 0x1d, 0xe8, + 0xde, 0x0a, 0x75, 0x73, 0x41, 0xb9, 0xff, 0xcd, 0x05, 0xf6, 0x5b, 0xc3, 0xf4, 0xf5, 0xa2, 0x70, + 0x26, 0x26, 0xcd, 0x84, 0x7e, 0xdf, 0x4e, 0xec, 0x8b, 0x41, 0xa2, 0xbe, 0xef, 0x0d, 0xbc, 0x80, + 0x69, 0x7b, 0xe6, 0x28, 0xa6, 0x74, 0xa0, 0xca, 0x00, 0xe5, 0x7d, 0x2b, 0x03, 0xbc, 0x0c, 0x27, + 0x92, 0x64, 0x63, 0x39, 0xf6, 0xb6, 0x9c, 0x94, 0x5c, 0x23, 0x3b, 0xc2, 0xca, 0xd2, 0xd9, 0xbc, + 0x2b, 0x57, 0x34, 0x10, 0x67, 0x71, 0xd1, 0x1c, 0x8c, 0xe9, 0xfc, 0x7c, 0x12, 0xa7, 0x2c, 0xa6, + 0x97, 0x8f, 0x04, 0x95, 0xba, 0xa7, 0x33, 0xfa, 0x05, 0x02, 0xee, 0x7e, 0x86, 0xea, 0xb7, 0x4c, + 0x23, 0x15, 0x64, 0x28, 0xab, 0xdf, 0x32, 0x74, 0xa8, 0x2c, 0x5d, 0x4f, 0xa0, 0x45, 0x38, 0xc3, + 0x07, 0xc6, 0x74, 0x14, 0x19, 0x6f, 0x34, 0x9c, 0xad, 0x5e, 0x36, 0xd7, 0x8d, 0x82, 0x7b, 0x3d, + 0x87, 0x5e, 0x84, 0x11, 0xd5, 0x3c, 0x3f, 0x2b, 0x4e, 0x11, 0x94, 0x17, 0x43, 0x91, 0x99, 0x6f, + 0x62, 0x13, 0x0f, 0x7d, 0x08, 0x1e, 0xd5, 0x7f, 0x79, 0xe2, 0x07, 0x3f, 0x5a, 0x9b, 0x15, 0xa5, + 0x4f, 0x54, 0x9d, 0xfc, 0xb9, 0x9e, 0x68, 0x4d, 0xdc, 0xef, 0x79, 0xb4, 0x06, 0xe7, 0x15, 0xe8, + 0x52, 0x90, 0xb2, 0x28, 0xee, 0x84, 0x34, 0x9c, 0x84, 0xdc, 0x88, 0x7d, 0x56, 0x2c, 0xa5, 0xae, + 0xaf, 0x30, 0x9b, 0xf3, 0xd2, 0x2b, 0xbd, 0x30, 0xf1, 0x02, 0xbe, 0x07, 0x15, 0x34, 0x05, 0x75, + 0x12, 0x38, 0x6b, 0x3e, 0x59, 0x9a, 0x99, 0x67, 0x25, 0x54, 0x8c, 0x93, 0xbc, 0x4b, 0x12, 0x80, + 0x35, 0x8e, 0x8a, 0x2b, 0x1b, 0xed, 0x7b, 0x9d, 0xde, 0x32, 0x9c, 0x6d, 0xb9, 0x11, 0xb5, 0x3d, + 0x3c, 0x97, 0x4c, 0xbb, 0x2c, 0xb6, 0x8a, 0x7e, 0x18, 0x5e, 0x56, 0x4e, 0x05, 0x4d, 0xce, 0xcd, + 0x2c, 0x77, 0xe1, 0xe0, 0x9e, 0x4f, 0xd2, 0x39, 0x16, 0xc5, 0xe1, 0xf6, 0xce, 0xf8, 0x99, 0xec, + 0x1c, 0x5b, 0xa6, 0x8d, 0x98, 0xc3, 0xd0, 0x55, 0x40, 0x2c, 0x02, 0xf7, 0x4a, 0x9a, 0x46, 0xca, + 0xd8, 0x19, 0x3f, 0xcb, 0x5e, 0xe9, 0xbc, 0x78, 0x02, 0x5d, 0xee, 0xc2, 0xc0, 0x3d, 0x9e, 0xb2, + 0xff, 0x9d, 0x05, 0x27, 0xd4, 0x7c, 0xbd, 0x0f, 0x31, 0xe8, 0x7e, 0x36, 0x06, 0x7d, 0xee, 0xe8, + 0x1a, 0x8f, 0x49, 0xde, 0x27, 0x90, 0xf1, 0x67, 0x46, 0x00, 0xb4, 0x56, 0x54, 0x0b, 0x92, 0xd5, + 0x77, 0x41, 0x7a, 0x68, 0x35, 0x52, 0xaf, 0x7a, 0x09, 0xd5, 0x07, 0x5b, 0x2f, 0x61, 0x05, 0xce, + 0x49, 0x73, 0x81, 0x9f, 0x15, 0x5d, 0x09, 0x13, 0xa5, 0xe0, 0x6a, 0x8d, 0x27, 0x04, 0xa1, 0x73, + 0xf3, 0xbd, 0x90, 0x70, 0xef, 0x67, 0x33, 0x56, 0xca, 0xf0, 0x7e, 0x56, 0x8a, 0x9e, 0xd3, 0x0b, + 0xeb, 0xb2, 0x20, 0x7e, 0x6e, 0x4e, 0x2f, 0x5c, 0x5e, 0xc1, 0x1a, 0xa7, 0xb7, 0x62, 0xaf, 0x17, + 0xa4, 0xd8, 0xe1, 0xc0, 0x8a, 0x5d, 0xaa, 0x98, 0x91, 0xbe, 0x2a, 0x46, 0xfa, 0xa4, 0x47, 0xfb, + 0xfa, 0xa4, 0xdf, 0x0f, 0x27, 0xbd, 0x60, 0x83, 0xc4, 0x5e, 0x4a, 0x9a, 0x6c, 0x2e, 0x30, 0xf5, + 0x53, 0xd3, 0xcb, 0xfa, 0x7c, 0x06, 0x8a, 0x73, 0xd8, 0x59, 0xbd, 0x78, 0x72, 0x00, 0xbd, 0xd8, + 0x67, 0x35, 0x3a, 0x55, 0xcc, 0x6a, 0x74, 0xfa, 0xe8, 0xab, 0xd1, 0xd8, 0xb1, 0xae, 0x46, 0xa8, + 0x90, 0xd5, 0x68, 0x20, 0x45, 0x6f, 0x6c, 0xff, 0xce, 0xee, 0xb3, 0xfd, 0xeb, 0xb7, 0x14, 0x9d, + 0x3b, 0xf4, 0x52, 0xd4, 0x7b, 0x95, 0x79, 0xe4, 0x50, 0xab, 0xcc, 0x67, 0x4a, 0x70, 0x4e, 0xeb, + 0x61, 0x3a, 0xfa, 0xbd, 0x75, 0xaa, 0x89, 0xd8, 0x9d, 0x2a, 0xfc, 0xdc, 0xc6, 0x48, 0x89, 0xd0, + 0xd9, 0x15, 0x0a, 0x82, 0x0d, 0x2c, 0x96, 0x59, 0x40, 0x62, 0x56, 0x3c, 0x33, 0xaf, 0xa4, 0x67, + 0x44, 0x3b, 0x56, 0x18, 0x74, 0x7c, 0xd1, 0xdf, 0x22, 0x5b, 0x2b, 0x5f, 0x22, 0x6a, 0x46, 0x83, + 0xb0, 0x89, 0x87, 0x9e, 0xe1, 0x4c, 0x98, 0x82, 0xa0, 0x8a, 0x7a, 0x54, 0x5c, 0xb2, 0x28, 0x75, + 0x82, 0x82, 0x4a, 0x71, 0x58, 0x0a, 0x49, 0xb5, 0x5b, 0x1c, 0x16, 0x02, 0xa5, 0x30, 0xec, 0xff, + 0x6e, 0xc1, 0x63, 0x3d, 0xbb, 0xe2, 0x3e, 0x2c, 0xbe, 0xdb, 0xd9, 0xc5, 0x77, 0xa5, 0xa8, 0xed, + 0x86, 0xf1, 0x16, 0x7d, 0x16, 0xe2, 0x7f, 0x63, 0xc1, 0x49, 0x8d, 0x7f, 0x1f, 0x5e, 0xd5, 0xcb, + 0xbe, 0x6a, 0x71, 0x3b, 0xab, 0x7a, 0xd7, 0xbb, 0xfd, 0x4e, 0x09, 0x54, 0xd9, 0xb6, 0x69, 0x57, + 0x16, 0xc5, 0xdc, 0xe7, 0x24, 0x71, 0x07, 0x86, 0xd8, 0x41, 0x68, 0x52, 0x4c, 0x90, 0x47, 0x96, + 0x3f, 0x3b, 0x54, 0xd5, 0x87, 0xcc, 0xec, 0x6f, 0x82, 0x05, 0x43, 0x56, 0xda, 0xd5, 0x4b, 0xa8, + 0x36, 0x6f, 0x8a, 0x64, 0x0c, 0x5d, 0xda, 0x55, 0xb4, 0x63, 0x85, 0x41, 0x97, 0x07, 0xcf, 0x0d, + 0x83, 0x19, 0xdf, 0x49, 0xe4, 0x45, 0x62, 0x6a, 0x79, 0x98, 0x97, 0x00, 0xac, 0x71, 0xd8, 0x19, + 0xa9, 0x97, 0x44, 0xbe, 0xb3, 0x63, 0xec, 0x9f, 0x8d, 0xac, 0x64, 0x05, 0xc2, 0x26, 0x9e, 0xdd, + 0x86, 0xf1, 0xec, 0x4b, 0xcc, 0x92, 0x75, 0x16, 0xa0, 0x38, 0x50, 0x77, 0x4e, 0x41, 0xdd, 0x61, + 0x4f, 0x2d, 0x74, 0x9c, 0xfc, 0xfd, 0xbf, 0xd3, 0x12, 0x80, 0x35, 0x8e, 0xfd, 0xab, 0x16, 0x9c, + 0xe9, 0xd1, 0x69, 0x05, 0x26, 0xbb, 0xa4, 0x5a, 0xdb, 0xf4, 0x5a, 0xd8, 0xdf, 0x0d, 0xc3, 0x4d, + 0xb2, 0xee, 0xc8, 0x10, 0x38, 0x43, 0xb7, 0xcf, 0xf2, 0x66, 0x2c, 0xe1, 0xf6, 0x7f, 0xb5, 0xe0, + 0x54, 0x56, 0xd6, 0x84, 0x6a, 0x67, 0xfe, 0x32, 0xb3, 0x5e, 0xe2, 0x86, 0x5b, 0x24, 0xde, 0xa1, + 0x6f, 0xce, 0xa5, 0x56, 0xda, 0x79, 0xba, 0x0b, 0x03, 0xf7, 0x78, 0x8a, 0x15, 0x6d, 0x6c, 0xaa, + 0xde, 0x96, 0x23, 0xf2, 0x66, 0x91, 0x23, 0x52, 0x7f, 0x4c, 0xf3, 0xb8, 0x5c, 0xb1, 0xc4, 0x26, + 0x7f, 0xfb, 0x3b, 0x15, 0x50, 0xd9, 0x70, 0x2c, 0xfe, 0xa8, 0xa0, 0xe8, 0xad, 0xcc, 0x9d, 0x47, + 0xe5, 0x01, 0xee, 0x3c, 0x92, 0x83, 0xa1, 0x72, 0xaf, 0x80, 0x00, 0xee, 0x25, 0x31, 0x5d, 0x97, + 0xea, 0x0d, 0x57, 0x35, 0x08, 0x9b, 0x78, 0x54, 0x12, 0xdf, 0xdb, 0x22, 0xfc, 0xa1, 0xa1, 0xac, + 0x24, 0x0b, 0x12, 0x80, 0x35, 0x0e, 0x95, 0xa4, 0xe9, 0xad, 0xaf, 0x8b, 0x2d, 0xbf, 0x92, 0x84, + 0xf6, 0x0e, 0x66, 0x10, 0x5e, 0x87, 0x37, 0xdc, 0x14, 0x56, 0xb0, 0x51, 0x87, 0x37, 0xdc, 0xc4, + 0x0c, 0x42, 0xed, 0xb6, 0x20, 0x8c, 0xdb, 0xec, 0x7e, 0xe6, 0xa6, 0xe2, 0x22, 0xac, 0x5f, 0x65, + 0xb7, 0x5d, 0xef, 0x46, 0xc1, 0xbd, 0x9e, 0xa3, 0x23, 0x30, 0x8a, 0x49, 0xd3, 0x73, 0x53, 0x93, + 0x1a, 0x64, 0x47, 0xe0, 0x72, 0x17, 0x06, 0xee, 0xf1, 0x14, 0x9a, 0x86, 0x53, 0x32, 0x9b, 0x51, + 0xd6, 0xaa, 0x18, 0xc9, 0xe6, 0xc6, 0xe3, 0x2c, 0x18, 0xe7, 0xf1, 0xa9, 0x56, 0x6b, 0x8b, 0x32, + 0x35, 0xcc, 0x58, 0x36, 0xb4, 0x9a, 0x2c, 0x5f, 0x83, 0x15, 0x86, 0xfd, 0xa9, 0x32, 0x5d, 0x85, + 0xfb, 0x94, 0x67, 0xba, 0x6f, 0xd1, 0x82, 0xd9, 0x11, 0x59, 0x19, 0x60, 0x44, 0xbe, 0x00, 0xa3, + 0xb7, 0x93, 0x30, 0x50, 0x91, 0x78, 0xd5, 0xbe, 0x91, 0x78, 0x06, 0x56, 0xef, 0x48, 0xbc, 0xa1, + 0xa2, 0x22, 0xf1, 0x86, 0x0f, 0x19, 0x89, 0xf7, 0xad, 0x2a, 0xa8, 0x0b, 0x01, 0xae, 0x93, 0xf4, + 0x4e, 0x18, 0x6f, 0x7a, 0x41, 0x8b, 0x65, 0x81, 0x7e, 0xdd, 0x82, 0x51, 0x3e, 0x5f, 0x16, 0xcc, + 0x4c, 0xaa, 0xf5, 0x82, 0x2a, 0xcd, 0x67, 0x98, 0x4d, 0xae, 0x1a, 0x8c, 0x72, 0xf7, 0xd8, 0x99, + 0x20, 0x9c, 0x91, 0x08, 0x7d, 0x1c, 0x40, 0xfa, 0x47, 0xd7, 0xa5, 0xca, 0x9c, 0x2f, 0x46, 0x3e, + 0x4c, 0xd6, 0xb5, 0x0d, 0xbc, 0xaa, 0x98, 0x60, 0x83, 0x21, 0xfa, 0x4c, 0xfe, 0xfe, 0xfa, 0x8f, + 0x1e, 0x4b, 0xdf, 0x0c, 0x92, 0x63, 0x86, 0x61, 0xd8, 0x0b, 0x5a, 0x74, 0x9c, 0x88, 0x88, 0xa5, + 0x77, 0xf5, 0xca, 0xa0, 0x5e, 0x08, 0x9d, 0x66, 0xc3, 0xf1, 0x9d, 0xc0, 0x25, 0xf1, 0x3c, 0x47, + 0x37, 0x6f, 0x6f, 0x65, 0x0d, 0x58, 0x12, 0xea, 0xba, 0x4a, 0xa1, 0x3a, 0xc8, 0x55, 0x0a, 0xe7, + 0x3f, 0x00, 0x63, 0x5d, 0x1f, 0xf3, 0x40, 0x29, 0x65, 0x87, 0xcf, 0x46, 0xb3, 0xff, 0xe9, 0x90, + 0x5e, 0xb4, 0xae, 0x87, 0x4d, 0x5e, 0xd0, 0x3f, 0xd6, 0x5f, 0x54, 0xd8, 0xb8, 0x05, 0x0e, 0x11, + 0xe3, 0x06, 0x58, 0xd5, 0x88, 0x4d, 0x96, 0x74, 0x8c, 0x46, 0x4e, 0x4c, 0x82, 0xe3, 0x1e, 0xa3, + 0xcb, 0x8a, 0x09, 0x36, 0x18, 0xa2, 0x8d, 0x4c, 0x4e, 0xc9, 0xe5, 0xa3, 0xe7, 0x94, 0xb0, 0xda, + 0x32, 0xbd, 0x6a, 0x70, 0x7f, 0xc9, 0x82, 0x93, 0x41, 0x66, 0xe4, 0x16, 0x13, 0x46, 0xda, 0x7b, + 0x56, 0xf0, 0xfb, 0x64, 0xb2, 0x6d, 0x38, 0xc7, 0xbf, 0xd7, 0x92, 0x56, 0x3d, 0xe0, 0x92, 0xa6, + 0x6f, 0x06, 0x19, 0xea, 0x77, 0x33, 0x08, 0x0a, 0xd4, 0xd5, 0x48, 0xc3, 0x85, 0x5f, 0x8d, 0x04, + 0x3d, 0xae, 0x45, 0xba, 0x05, 0x75, 0x37, 0x26, 0x4e, 0x7a, 0xc8, 0x5b, 0x72, 0xd8, 0x01, 0xfd, + 0x8c, 0x24, 0x80, 0x35, 0x2d, 0xfb, 0x7f, 0x57, 0xe0, 0xb4, 0xec, 0x11, 0x19, 0x82, 0x4e, 0xd7, + 0x47, 0xce, 0x57, 0x1b, 0xb7, 0x6a, 0x7d, 0xbc, 0x22, 0x01, 0x58, 0xe3, 0x50, 0x7b, 0xac, 0x93, + 0x90, 0xa5, 0x88, 0x04, 0x0b, 0xde, 0x5a, 0x22, 0xce, 0x39, 0xd5, 0x44, 0xb9, 0xa1, 0x41, 0xd8, + 0xc4, 0xa3, 0xc6, 0x38, 0xb7, 0x8b, 0x93, 0x7c, 0xfa, 0x8a, 0xb0, 0xb7, 0xb1, 0x84, 0xa3, 0x5f, + 0xe8, 0x59, 0x2f, 0xb2, 0x98, 0xc4, 0xad, 0xae, 0xc8, 0xfb, 0x03, 0x5e, 0xac, 0xf6, 0x37, 0x2c, + 0x38, 0xc7, 0x5b, 0x65, 0x4f, 0xde, 0x88, 0x9a, 0x4e, 0x4a, 0x92, 0x62, 0xea, 0x37, 0xf7, 0x90, + 0x4f, 0x3b, 0x79, 0x7b, 0xb1, 0xc5, 0xbd, 0xa5, 0x41, 0x5f, 0xb4, 0xe0, 0xd4, 0x66, 0x26, 0xd3, + 0x5f, 0x2e, 0x1d, 0x47, 0xac, 0x49, 0x93, 0x2d, 0x1f, 0xa0, 0xa7, 0x5a, 0xb6, 0x3d, 0xc1, 0x79, + 0xee, 0xf6, 0x9f, 0x58, 0x60, 0xaa, 0xd1, 0xc1, 0x2c, 0x40, 0xe3, 0x2a, 0xdb, 0xd2, 0x3e, 0x57, + 0xd9, 0x4a, 0x63, 0xb1, 0x3c, 0xd8, 0xe6, 0xa4, 0x72, 0x80, 0xcd, 0x49, 0xb5, 0xaf, 0x75, 0xf9, + 0x04, 0x94, 0x3b, 0x5e, 0x53, 0xec, 0x2f, 0xf4, 0xe9, 0xeb, 0xfc, 0x2c, 0xa6, 0xed, 0xf6, 0x3f, + 0xaa, 0x6a, 0xbf, 0x85, 0xc8, 0x8b, 0xfa, 0xbe, 0x78, 0xed, 0x75, 0x55, 0x62, 0x88, 0xbf, 0xf9, + 0xf5, 0xae, 0x12, 0x43, 0x3f, 0x72, 0xf0, 0xb4, 0x37, 0xde, 0x41, 0xfd, 0x2a, 0x0c, 0x0d, 0xef, + 0x93, 0xf3, 0x76, 0x1b, 0x6a, 0x74, 0x0b, 0xc6, 0x1c, 0x90, 0xb5, 0x8c, 0x50, 0xb5, 0x2b, 0xa2, + 0xfd, 0xee, 0xee, 0xc4, 0x0f, 0x1f, 0x5c, 0x2c, 0xf9, 0x34, 0x56, 0xf4, 0x51, 0x02, 0x75, 0xfa, + 0x9b, 0xa5, 0xe7, 0x89, 0xcd, 0xdd, 0x0d, 0xa5, 0x33, 0x25, 0xa0, 0x90, 0xdc, 0x3f, 0xcd, 0x07, + 0x05, 0x50, 0x67, 0x77, 0x50, 0x32, 0xa6, 0x7c, 0x0f, 0xb8, 0xac, 0x92, 0xe4, 0x24, 0xe0, 0xee, + 0xee, 0xc4, 0xcb, 0x07, 0x67, 0xaa, 0x1e, 0xc7, 0x9a, 0x85, 0xfd, 0xe5, 0x8a, 0x1e, 0xbb, 0xa2, + 0xb2, 0xd4, 0xf7, 0xc5, 0xd8, 0x7d, 0x29, 0x37, 0x76, 0x2f, 0x74, 0x8d, 0xdd, 0x93, 0xfa, 0xae, + 0xc4, 0xcc, 0x68, 0xbc, 0xdf, 0x86, 0xc0, 0xfe, 0xfe, 0x06, 0x66, 0x01, 0xbd, 0xd1, 0xf1, 0x62, + 0x92, 0x2c, 0xc7, 0x9d, 0xc0, 0x0b, 0x5a, 0xe2, 0x0e, 0x7c, 0xc3, 0x02, 0xca, 0x80, 0x71, 0x1e, + 0x9f, 0xdd, 0x9f, 0xbf, 0x13, 0xb8, 0xb7, 0x9c, 0x2d, 0x3e, 0xaa, 0x8c, 0x62, 0x3b, 0x2b, 0xa2, + 0x1d, 0x2b, 0x0c, 0xfb, 0x2d, 0x76, 0x96, 0x6d, 0xe4, 0x05, 0xd3, 0x31, 0xe1, 0xb3, 0x4b, 0x3f, + 0x79, 0xa5, 0x1e, 0x35, 0x26, 0xf8, 0x4d, 0x9f, 0x1c, 0x86, 0xee, 0xc0, 0xf0, 0x1a, 0xbf, 0xf5, + 0xaa, 0x98, 0xaa, 0xc4, 0xe2, 0x0a, 0x2d, 0x76, 0xb7, 0x81, 0xbc, 0x4f, 0xeb, 0xae, 0xfe, 0x89, + 0x25, 0x37, 0xfb, 0x9b, 0x15, 0x38, 0x95, 0xbb, 0x16, 0x32, 0x53, 0x23, 0xb1, 0xb4, 0x6f, 0x8d, + 0xc4, 0x8f, 0x00, 0x34, 0x49, 0xe4, 0x87, 0x3b, 0xcc, 0x1c, 0xab, 0x1c, 0xd8, 0x1c, 0x53, 0x16, + 0xfc, 0xac, 0xa2, 0x82, 0x0d, 0x8a, 0xa2, 0x3c, 0x11, 0x2f, 0xb9, 0x98, 0x2b, 0x4f, 0x64, 0xd4, + 0x2e, 0x1f, 0xba, 0xbf, 0xb5, 0xcb, 0x3d, 0x38, 0xc5, 0x45, 0x54, 0xd9, 0xb7, 0x87, 0x48, 0xb2, + 0x65, 0xf9, 0x0b, 0xb3, 0x59, 0x32, 0x38, 0x4f, 0xf7, 0x41, 0xde, 0xfa, 0x8a, 0xde, 0x03, 0x75, + 0xf9, 0x9d, 0x93, 0xf1, 0xba, 0xae, 0x60, 0x20, 0x87, 0x01, 0xbb, 0x8d, 0x55, 0xfc, 0xb4, 0xbf, + 0x50, 0xa2, 0xd6, 0x33, 0xff, 0xa7, 0x2a, 0xd1, 0x3c, 0x0d, 0x43, 0x4e, 0x27, 0xdd, 0x08, 0xbb, + 0x6e, 0xce, 0x9a, 0x66, 0xad, 0x58, 0x40, 0xd1, 0x02, 0x54, 0x9a, 0xba, 0xba, 0xc8, 0x41, 0x7a, + 0x51, 0x3b, 0x22, 0x9d, 0x94, 0x60, 0x46, 0x05, 0x3d, 0x0e, 0x95, 0xd4, 0x69, 0xc9, 0x44, 0x27, + 0x96, 0xdc, 0xba, 0xea, 0xb4, 0x12, 0xcc, 0x5a, 0xcd, 0x45, 0xb3, 0xb2, 0xcf, 0xa2, 0xf9, 0x32, + 0x9c, 0x48, 0xbc, 0x56, 0xe0, 0xa4, 0x9d, 0x98, 0x18, 0x87, 0x6b, 0x3a, 0x5e, 0xc2, 0x04, 0xe2, + 0x2c, 0xae, 0xfd, 0x9b, 0xa3, 0x70, 0x76, 0x65, 0x66, 0x51, 0x56, 0xca, 0x3d, 0xb6, 0x5c, 0xa5, + 0x5e, 0x3c, 0xee, 0x5f, 0xae, 0x52, 0x1f, 0xee, 0xbe, 0x91, 0xab, 0xe4, 0x1b, 0xb9, 0x4a, 0xd9, + 0xc4, 0x91, 0x72, 0x11, 0x89, 0x23, 0xbd, 0x24, 0x18, 0x24, 0x71, 0xe4, 0xd8, 0x92, 0x97, 0xee, + 0x29, 0xd0, 0x81, 0x92, 0x97, 0x54, 0x66, 0x57, 0x21, 0x21, 0xfd, 0x7d, 0x3e, 0x55, 0xcf, 0xcc, + 0x2e, 0x95, 0x55, 0xc3, 0xd3, 0x55, 0x84, 0x82, 0x7d, 0xad, 0x78, 0x01, 0x06, 0xc8, 0xaa, 0x11, + 0x19, 0x33, 0x66, 0x26, 0xd7, 0x70, 0x11, 0x99, 0x5c, 0xbd, 0xc4, 0xd9, 0x37, 0x93, 0xeb, 0x65, + 0x38, 0xe1, 0xfa, 0x61, 0x40, 0x96, 0xe3, 0x30, 0x0d, 0xdd, 0xd0, 0x17, 0xc6, 0xb4, 0x52, 0x09, + 0x33, 0x26, 0x10, 0x67, 0x71, 0xfb, 0xa5, 0x81, 0xd5, 0x8f, 0x9a, 0x06, 0x06, 0x0f, 0x28, 0x0d, + 0xec, 0x67, 0x75, 0xc2, 0xf2, 0x08, 0xfb, 0x22, 0x1f, 0x29, 0xfe, 0x8b, 0x0c, 0x92, 0xb5, 0x8c, + 0xbe, 0xca, 0xaf, 0xae, 0xa2, 0xe6, 0xe8, 0x4c, 0xd8, 0xa6, 0xe6, 0xd6, 0x28, 0xeb, 0x92, 0xd7, + 0x8f, 0x61, 0xc0, 0xde, 0x5a, 0xd1, 0x6c, 0xd4, 0x75, 0x56, 0xba, 0x09, 0x67, 0x05, 0x39, 0x4a, + 0x42, 0xf5, 0xd7, 0x4a, 0xf0, 0x03, 0xfb, 0x8a, 0x80, 0xee, 0x00, 0xa4, 0x4e, 0x4b, 0x0c, 0x54, + 0x71, 0x4c, 0x71, 0xc4, 0xa0, 0xc6, 0x55, 0x49, 0x8f, 0x57, 0x02, 0x51, 0x7f, 0xd9, 0x01, 0x80, + 0xfc, 0xcd, 0x62, 0x19, 0x43, 0xbf, 0xab, 0xea, 0x21, 0x0e, 0x7d, 0x82, 0x19, 0x84, 0x2e, 0xff, + 0x31, 0x69, 0xe9, 0xbb, 0x56, 0xd5, 0xe7, 0xc3, 0xac, 0x15, 0x0b, 0x28, 0x7a, 0x11, 0x46, 0x1c, + 0xdf, 0xe7, 0x59, 0x29, 0x24, 0x11, 0x77, 0x57, 0xe8, 0xca, 0x6d, 0x1a, 0x84, 0x4d, 0x3c, 0xfb, + 0x8f, 0x4b, 0x30, 0xb1, 0x8f, 0x4e, 0xe9, 0xca, 0xb3, 0xab, 0x0e, 0x9c, 0x67, 0x27, 0x32, 0x03, + 0x86, 0xfa, 0x64, 0x06, 0xbc, 0x08, 0x23, 0x29, 0x71, 0xda, 0x22, 0x0c, 0x4a, 0xec, 0xbf, 0xf5, + 0xb9, 0xab, 0x06, 0x61, 0x13, 0x8f, 0x6a, 0xb1, 0x93, 0x8e, 0xeb, 0x92, 0x24, 0x91, 0xa1, 0xff, + 0xc2, 0x87, 0x59, 0x58, 0x5e, 0x01, 0x73, 0x0d, 0x4f, 0x67, 0x58, 0xe0, 0x1c, 0xcb, 0x7c, 0x87, + 0xd7, 0x07, 0xec, 0xf0, 0x6f, 0x94, 0xe0, 0x89, 0x7b, 0xae, 0x6e, 0x03, 0x67, 0x65, 0x74, 0x12, + 0x12, 0xe7, 0x07, 0xce, 0x8d, 0x84, 0xc4, 0x98, 0x41, 0x78, 0x2f, 0x45, 0x91, 0x71, 0x97, 0x6d, + 0xd1, 0x29, 0x43, 0xbc, 0x97, 0x32, 0x2c, 0x70, 0x8e, 0xe5, 0x61, 0x87, 0xe5, 0xdf, 0x29, 0xc1, + 0x53, 0x03, 0xd8, 0x00, 0x05, 0xa6, 0x56, 0x65, 0x13, 0xdc, 0xca, 0x0f, 0x28, 0x0f, 0xf1, 0x90, + 0xdd, 0xf5, 0x56, 0x09, 0xce, 0xf7, 0x5f, 0x8a, 0xd1, 0x8f, 0xd2, 0x3d, 0xbc, 0x8c, 0x7d, 0x32, + 0x73, 0xe3, 0xce, 0xf0, 0xfd, 0x7b, 0x06, 0x84, 0xf3, 0xb8, 0x68, 0x12, 0x20, 0x72, 0xd2, 0x8d, + 0xe4, 0xd2, 0xb6, 0x97, 0xa4, 0xa2, 0xf6, 0xcb, 0x49, 0x7e, 0x62, 0x24, 0x5b, 0xb1, 0x81, 0x41, + 0xd9, 0xb1, 0x7f, 0xb3, 0xe1, 0xf5, 0x30, 0xe5, 0x0f, 0xf1, 0x6d, 0xc4, 0x19, 0x59, 0x1f, 0xdf, + 0x00, 0xe1, 0x3c, 0x2e, 0x65, 0xc7, 0xce, 0x24, 0xb9, 0xa0, 0x7c, 0x7f, 0xc1, 0xd8, 0x2d, 0xa8, + 0x56, 0x6c, 0x60, 0xe4, 0xb3, 0xfe, 0xaa, 0xfb, 0x67, 0xfd, 0xd9, 0xff, 0xb0, 0x04, 0x8f, 0xf5, + 0x35, 0xe5, 0x06, 0x9b, 0x80, 0x0f, 0x5f, 0xa6, 0xde, 0xe1, 0xc6, 0xce, 0x01, 0x33, 0xca, 0xfe, + 0xb0, 0xcf, 0x48, 0x13, 0x19, 0x65, 0x87, 0x4f, 0xc9, 0x7e, 0xf8, 0xfa, 0xb3, 0x2b, 0x89, 0xac, + 0x72, 0x80, 0x24, 0xb2, 0xdc, 0xc7, 0xa8, 0x0e, 0x38, 0x91, 0xff, 0x6f, 0xff, 0xee, 0xa5, 0x5b, + 0xbf, 0x81, 0xbc, 0xa3, 0xb3, 0x70, 0xda, 0x0b, 0xd8, 0x5d, 0x29, 0x2b, 0x9d, 0x35, 0x51, 0x0e, + 0xa4, 0x94, 0xbd, 0xa9, 0x78, 0x3e, 0x07, 0xc7, 0x5d, 0x4f, 0x3c, 0x84, 0x49, 0x7d, 0x87, 0xeb, + 0xd2, 0x03, 0xa6, 0x95, 0x7e, 0x04, 0xea, 0x4a, 0x12, 0x1e, 0xd6, 0xac, 0x3e, 0x7f, 0x57, 0x58, + 0xb3, 0xfa, 0xf6, 0x06, 0x16, 0xed, 0x37, 0x6a, 0x9c, 0xe6, 0xc6, 0xf1, 0x35, 0xb2, 0xc3, 0x2c, + 0x55, 0xfb, 0xbd, 0x30, 0xaa, 0x3c, 0x1e, 0x83, 0x5e, 0x9f, 0x61, 0x7f, 0x79, 0x08, 0x4e, 0x64, + 0x8a, 0xe3, 0x65, 0x1c, 0x8c, 0xd6, 0xbe, 0x0e, 0x46, 0x16, 0xa6, 0xde, 0x09, 0xe4, 0xdd, 0x3a, + 0x46, 0x98, 0x7a, 0x27, 0x20, 0x98, 0xc3, 0xa8, 0xa1, 0xd9, 0x8c, 0x77, 0x70, 0x27, 0x10, 0xe1, + 0xa4, 0xca, 0xd0, 0x9c, 0x65, 0xad, 0x58, 0x40, 0xd1, 0x27, 0x2d, 0x18, 0x4d, 0x98, 0xf7, 0x9a, + 0xbb, 0x67, 0xc5, 0xe7, 0xbf, 0x7a, 0xf4, 0xda, 0x7f, 0xaa, 0x10, 0x24, 0x8b, 0x10, 0x31, 0x5b, + 0x70, 0x86, 0x23, 0xfa, 0x69, 0x0b, 0xea, 0xea, 0x0a, 0x00, 0x71, 0x01, 0xd6, 0x4a, 0xb1, 0xb5, + 0x07, 0xb9, 0x5f, 0x4f, 0x1d, 0x04, 0xe8, 0x3b, 0xbd, 0x35, 0x63, 0x94, 0x28, 0xdf, 0xe9, 0xf0, + 0xf1, 0xf8, 0x4e, 0xa1, 0x87, 0xdf, 0xf4, 0x3d, 0x50, 0x6f, 0x3b, 0x81, 0xb7, 0x4e, 0x92, 0x94, + 0xbb, 0x33, 0x65, 0x49, 0x54, 0xd9, 0x88, 0x35, 0x9c, 0x2e, 0x8d, 0x09, 0x7b, 0xb1, 0xd4, 0xf0, + 0x3f, 0xb2, 0xa5, 0x71, 0x45, 0x37, 0x63, 0x13, 0xc7, 0x74, 0x96, 0xc2, 0x03, 0x75, 0x96, 0x8e, + 0xec, 0xe3, 0x2c, 0xfd, 0x7b, 0x16, 0x9c, 0xeb, 0xf9, 0xd5, 0x1e, 0xde, 0xc0, 0x3f, 0xfb, 0x2b, + 0x55, 0x38, 0xd3, 0xa3, 0xca, 0x25, 0xda, 0x31, 0xc7, 0xb3, 0x55, 0xc4, 0x19, 0x7a, 0xf6, 0x48, + 0x58, 0x76, 0x63, 0x8f, 0x41, 0x7c, 0xb0, 0xa3, 0x0a, 0x7d, 0x5c, 0x50, 0xbe, 0xbf, 0xc7, 0x05, + 0xc6, 0xb0, 0xac, 0x3c, 0xd0, 0x61, 0x59, 0xbd, 0xf7, 0xb0, 0x44, 0xbf, 0x66, 0xc1, 0x78, 0xbb, + 0x4f, 0x69, 0x75, 0xe1, 0x02, 0xbc, 0x79, 0x3c, 0x85, 0xdb, 0x1b, 0x8f, 0xef, 0xed, 0x4e, 0xf4, + 0xad, 0x68, 0x8f, 0xfb, 0x4a, 0x65, 0x7f, 0xa7, 0x0c, 0xac, 0xc4, 0x2a, 0xab, 0x64, 0xb6, 0x83, + 0x3e, 0x61, 0x16, 0xcb, 0xb5, 0x8a, 0x2a, 0xec, 0xca, 0x89, 0xab, 0x62, 0xbb, 0xbc, 0x07, 0x7b, + 0xd5, 0xde, 0xcd, 0x2b, 0xad, 0xd2, 0x00, 0x4a, 0xcb, 0x97, 0x55, 0x89, 0xcb, 0xc5, 0x57, 0x25, + 0xae, 0xe7, 0x2b, 0x12, 0xdf, 0xfb, 0x13, 0x57, 0x1e, 0xca, 0x4f, 0xfc, 0xd7, 0x2c, 0xae, 0x78, + 0x72, 0x5f, 0x41, 0x5b, 0x06, 0xd6, 0x3d, 0x2c, 0x83, 0x67, 0xd9, 0xed, 0xec, 0xeb, 0x57, 0x88, + 0xe3, 0x0b, 0x0b, 0xc2, 0xbc, 0x68, 0x9d, 0xb5, 0x63, 0x85, 0xc1, 0x2e, 0x2b, 0xf4, 0xfd, 0xf0, + 0xce, 0xa5, 0x76, 0x94, 0xee, 0x08, 0x5b, 0x42, 0x5f, 0x56, 0xa8, 0x20, 0xd8, 0xc0, 0xb2, 0xff, + 0x7a, 0x89, 0x8f, 0x40, 0x11, 0x04, 0xf0, 0x52, 0xee, 0x7a, 0xa9, 0xc1, 0xcf, 0xcf, 0x3f, 0x06, + 0xe0, 0xaa, 0x8b, 0x99, 0xc5, 0xe9, 0xcc, 0x95, 0x23, 0xdf, 0x1a, 0x2b, 0xe8, 0xe9, 0xd7, 0xd0, + 0x6d, 0xd8, 0xe0, 0x97, 0xd1, 0xa5, 0xe5, 0x7d, 0x75, 0x69, 0x46, 0xad, 0x54, 0xf6, 0x59, 0xed, + 0xfe, 0xd8, 0x82, 0x8c, 0x45, 0x84, 0x22, 0xa8, 0x52, 0x71, 0x77, 0x8a, 0xb9, 0x73, 0xda, 0x24, + 0x4d, 0x55, 0xa3, 0x18, 0xf6, 0xec, 0x27, 0xe6, 0x8c, 0x90, 0x2f, 0x62, 0x05, 0x4a, 0x45, 0xdc, + 0x8b, 0x6e, 0x32, 0xbc, 0x12, 0x86, 0x9b, 0xfc, 0x88, 0x51, 0xc7, 0x1d, 0xd8, 0x2f, 0xc1, 0x58, + 0x97, 0x50, 0xec, 0x26, 0x99, 0x50, 0x5e, 0xb4, 0x6d, 0x0c, 0x57, 0x96, 0xc0, 0x88, 0x39, 0xcc, + 0x7e, 0xcb, 0x82, 0xd3, 0x79, 0xf2, 0xe8, 0xab, 0x16, 0x8c, 0x25, 0x79, 0x7a, 0xc7, 0xd5, 0x77, + 0x2a, 0xde, 0xaf, 0x0b, 0x84, 0xbb, 0x85, 0xb0, 0xff, 0x8f, 0x18, 0xfc, 0xb7, 0xbc, 0xa0, 0x19, + 0xde, 0x51, 0x86, 0x89, 0xd5, 0xd7, 0x30, 0xa1, 0xf3, 0xd1, 0xdd, 0x20, 0xcd, 0x8e, 0xdf, 0x95, + 0x39, 0xb9, 0x22, 0xda, 0xb1, 0xc2, 0x60, 0x89, 0x62, 0x1d, 0x51, 0xb6, 0x3c, 0x37, 0x28, 0x67, + 0x45, 0x3b, 0x56, 0x18, 0xe8, 0x05, 0x18, 0x35, 0x2f, 0x93, 0x17, 0xe3, 0x92, 0x19, 0xe4, 0xe6, + 0xbd, 0xf3, 0x38, 0x83, 0x85, 0x26, 0x01, 0x94, 0x91, 0x23, 0x97, 0x48, 0xe6, 0xb2, 0x51, 0x9a, + 0x28, 0xc1, 0x06, 0x06, 0x4b, 0xcb, 0xe4, 0x37, 0xb6, 0xcb, 0xa8, 0x58, 0x9e, 0x96, 0x29, 0xda, + 0xb0, 0x82, 0x52, 0x6d, 0xd2, 0x76, 0x82, 0x8e, 0xe3, 0xd3, 0x1e, 0x12, 0xb9, 0xe4, 0x6a, 0x1a, + 0x2e, 0x2a, 0x08, 0x36, 0xb0, 0xe8, 0x1b, 0xa7, 0x5e, 0x9b, 0xbc, 0x1a, 0x06, 0x32, 0x4e, 0x4b, + 0x1f, 0xc0, 0x88, 0x76, 0xac, 0x30, 0xec, 0xff, 0x6c, 0xc1, 0x29, 0x9d, 0xe4, 0xcd, 0xef, 0x8c, + 0x35, 0xf7, 0x8c, 0xd6, 0xbe, 0xf9, 0xeb, 0xd9, 0xec, 0xd7, 0xd2, 0x40, 0xd9, 0xaf, 0x66, 0x62, + 0x6a, 0xf9, 0x9e, 0x89, 0xa9, 0x3f, 0xa8, 0xef, 0x23, 0xe4, 0x19, 0xac, 0x23, 0xbd, 0xee, 0x22, + 0x44, 0x36, 0x0c, 0xb9, 0x8e, 0xaa, 0x70, 0x32, 0xca, 0xf7, 0x0e, 0x33, 0xd3, 0x0c, 0x49, 0x40, + 0xec, 0x25, 0xa8, 0xab, 0x73, 0x08, 0xb9, 0x51, 0xb5, 0x7a, 0x6f, 0x54, 0x07, 0x4a, 0x90, 0x6b, + 0xac, 0x7d, 0xf3, 0xbb, 0x4f, 0xbe, 0xe3, 0x77, 0xbf, 0xfb, 0xe4, 0x3b, 0xfe, 0xe0, 0xbb, 0x4f, + 0xbe, 0xe3, 0x93, 0x7b, 0x4f, 0x5a, 0xdf, 0xdc, 0x7b, 0xd2, 0xfa, 0xdd, 0xbd, 0x27, 0xad, 0x3f, + 0xd8, 0x7b, 0xd2, 0xfa, 0xce, 0xde, 0x93, 0xd6, 0x97, 0xfe, 0xc3, 0x93, 0xef, 0x78, 0xb5, 0x67, + 0xa0, 0x1e, 0xfd, 0xf1, 0x9c, 0xdb, 0x9c, 0xda, 0xba, 0xc8, 0x62, 0xc5, 0xe8, 0xf4, 0x9a, 0x32, + 0xc6, 0xd4, 0x94, 0x9c, 0x5e, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xc3, 0xee, 0x24, 0x07, 0x84, + 0xd9, 0x00, 0x00, } func (m *AWSAuthConfig) Marshal() (dAtA []byte, err error) { @@ -5125,6 +5567,38 @@ func (m *ApplicationMatchExpression) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } +func (m *ApplicationPreservedFields) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplicationPreservedFields) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplicationPreservedFields) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Annotations[iNdEx]) + copy(dAtA[i:], m.Annotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Annotations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ApplicationSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5308,6 +5782,18 @@ func (m *ApplicationSetGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.Plugin != nil { + { + size, err := m.Plugin.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.Selector != nil { { size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) @@ -5486,6 +5972,18 @@ func (m *ApplicationSetNestedGenerator) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Plugin != nil { + { + size, err := m.Plugin.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.Selector != nil { { size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) @@ -5703,6 +6201,35 @@ func (m *ApplicationSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.ApplyNestedSelectors { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.GoTemplateOptions) > 0 { + for iNdEx := len(m.GoTemplateOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GoTemplateOptions[iNdEx]) + copy(dAtA[i:], m.GoTemplateOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GoTemplateOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.PreservedFields != nil { + { + size, err := m.PreservedFields.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if m.Strategy != nil { { size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) @@ -5873,6 +6400,13 @@ func (m *ApplicationSetSyncPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if m.ApplicationsSync != nil { + i -= len(*m.ApplicationsSync) + copy(dAtA[i:], *m.ApplicationsSync) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApplicationsSync))) + i-- + dAtA[i] = 0x12 + } i-- if m.PreserveResourcesOnDeletion { dAtA[i] = 1 @@ -6037,6 +6571,30 @@ func (m *ApplicationSetTerminalGenerator) MarshalToSizedBuffer(dAtA []byte) (int _ = i var l int _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Plugin != nil { + { + size, err := m.Plugin.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } if m.PullRequest != nil { { size, err := m.PullRequest.MarshalToSizedBuffer(dAtA[:i]) @@ -6279,6 +6837,18 @@ func (m *ApplicationSourceHelm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ValuesObject != nil { + { + size, err := m.ValuesObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } i-- if m.SkipCrds { dAtA[i] = 1 @@ -6438,6 +7008,33 @@ func (m *ApplicationSourceKustomize) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if len(m.Replicas) > 0 { + for iNdEx := len(m.Replicas) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Replicas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + i-- + if m.CommonAnnotationsEnvsubst { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x4a i-- if m.ForceCommonAnnotations { dAtA[i] = 1 @@ -6612,38 +7209,29 @@ func (m *ApplicationSourcePluginParameter) MarshalToSizedBuffer(dAtA []byte) (in i-- dAtA[i] = 0x2a } - if len(m.Array) > 0 { - for iNdEx := len(m.Array) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Array[iNdEx]) - copy(dAtA[i:], m.Array[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Array[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.OptionalArray != nil { + { + size, err := m.OptionalArray.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for iNdEx := len(keysForMap) - 1; iNdEx >= 0; iNdEx-- { - v := m.Map[string(keysForMap[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForMap[iNdEx]) - copy(dAtA[i:], keysForMap[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMap[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a + if m.OptionalMap != nil { + { + size, err := m.OptionalMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } i -= len(m.Name) copy(dAtA[i:], m.Name) @@ -6782,6 +7370,11 @@ func (m *ApplicationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.ControllerNamespace) + copy(dAtA[i:], m.ControllerNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ControllerNamespace))) + i-- + dAtA[i] = 0x6a if len(m.SourceTypes) > 0 { for iNdEx := len(m.SourceTypes) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.SourceTypes[iNdEx]) @@ -7134,6 +7727,83 @@ func (m *BasicAuthBitbucketServer) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *BearerTokenBitbucketCloud) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BearerTokenBitbucketCloud) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BearerTokenBitbucketCloud) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TokenRef != nil { + { + size, err := m.TokenRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChartDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChartDetails) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChartDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Maintainers) > 0 { + for iNdEx := len(m.Maintainers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Maintainers[iNdEx]) + copy(dAtA[i:], m.Maintainers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Maintainers[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Home) + copy(dAtA[i:], m.Home) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Home))) + i-- + dAtA[i] = 0x12 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Cluster) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7637,6 +8307,20 @@ func (m *ComparedTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.IgnoreDifferences) > 0 { + for iNdEx := len(m.IgnoreDifferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IgnoreDifferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { @@ -8085,6 +8769,30 @@ func (m *GitGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Values) > 0 { + keysForValues := make([]string, 0, len(m.Values)) + for k := range m.Values { + keysForValues = append(keysForValues, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + for iNdEx := len(keysForValues) - 1; iNdEx >= 0; iNdEx-- { + v := m.Values[string(keysForValues[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForValues[iNdEx]) + copy(dAtA[i:], keysForValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForValues[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } i -= len(m.PathParamPrefix) copy(dAtA[i:], m.PathParamPrefix) i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathParamPrefix))) @@ -8718,6 +9426,44 @@ func (m *KustomizeOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KustomizeReplica) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KustomizeReplica) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KustomizeReplica) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Count.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ListGenerator) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8738,6 +9484,11 @@ func (m *ListGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.ElementsYaml) + copy(dAtA[i:], m.ElementsYaml) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ElementsYaml))) + i-- + dAtA[i] = 0x1a { size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -9207,6 +9958,85 @@ func (m *OperationState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *OptionalArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OptionalArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Array) > 0 { + for iNdEx := len(m.Array) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Array[iNdEx]) + copy(dAtA[i:], m.Array[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Array[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OptionalMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OptionalMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OptionalMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Map) > 0 { + keysForMap := make([]string, 0, len(m.Map)) + for k := range m.Map { + keysForMap = append(keysForMap, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMap) + for iNdEx := len(keysForMap) - 1; iNdEx >= 0; iNdEx-- { + v := m.Map[string(keysForMap[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMap[iNdEx]) + copy(dAtA[i:], keysForMap[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMap[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *OrphanedResourceKey) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9342,6 +10172,168 @@ func (m *OverrideIgnoreDiff) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PluginConfigMapRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginConfigMapRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginConfigMapRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PluginGenerator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginGenerator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + keysForValues := make([]string, 0, len(m.Values)) + for k := range m.Values { + keysForValues = append(keysForValues, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + for iNdEx := len(keysForValues) - 1; iNdEx >= 0; iNdEx-- { + v := m.Values[string(keysForValues[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForValues[iNdEx]) + copy(dAtA[i:], keysForValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForValues[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.RequeueAfterSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RequeueAfterSeconds)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.Input.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PluginInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginInput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PluginInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ProjectRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9427,6 +10419,30 @@ func (m *PullRequestGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AzureDevOps != nil { + { + size, err := m.AzureDevOps.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Bitbucket != nil { + { + size, err := m.Bitbucket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } { size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -9507,6 +10523,132 @@ func (m *PullRequestGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PullRequestGeneratorAzureDevOps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PullRequestGeneratorAzureDevOps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PullRequestGeneratorAzureDevOps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Labels[iNdEx]) + copy(dAtA[i:], m.Labels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Labels[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.TokenRef != nil { + { + size, err := m.TokenRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.API) + copy(dAtA[i:], m.API) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.API))) + i-- + dAtA[i] = 0x22 + i -= len(m.Repo) + copy(dAtA[i:], m.Repo) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repo))) + i-- + dAtA[i] = 0x1a + i -= len(m.Project) + copy(dAtA[i:], m.Project) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Project))) + i-- + dAtA[i] = 0x12 + i -= len(m.Organization) + copy(dAtA[i:], m.Organization) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PullRequestGeneratorBitbucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PullRequestGeneratorBitbucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PullRequestGeneratorBitbucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BearerToken != nil { + { + size, err := m.BearerToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BasicAuth != nil { + { + size, err := m.BasicAuth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.API) + copy(dAtA[i:], m.API) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.API))) + i-- + dAtA[i] = 0x1a + i -= len(m.Repo) + copy(dAtA[i:], m.Repo) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repo))) + i-- + dAtA[i] = 0x12 + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PullRequestGeneratorBitbucketServer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9577,6 +10719,13 @@ func (m *PullRequestGeneratorFilter) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if m.TargetBranchMatch != nil { + i -= len(*m.TargetBranchMatch) + copy(dAtA[i:], *m.TargetBranchMatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TargetBranchMatch))) + i-- + dAtA[i] = 0x12 + } if m.BranchMatch != nil { i -= len(*m.BranchMatch) copy(dAtA[i:], *m.BranchMatch) @@ -9607,6 +10756,14 @@ func (m *PullRequestGeneratorGitLab) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 i -= len(m.PullRequestState) copy(dAtA[i:], m.PullRequestState) i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullRequestState))) @@ -10292,6 +11449,16 @@ func (m *ResourceAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x2a + i -= len(m.IconClass) + copy(dAtA[i:], m.IconClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IconClass))) + i-- + dAtA[i] = 0x22 i-- if m.Disabled { dAtA[i] = 1 @@ -10838,6 +12005,16 @@ func (m *ResourceOverride) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.IgnoreResourceUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 i-- if m.UseOpenLibs { dAtA[i] = 1 @@ -11294,6 +12471,42 @@ func (m *SCMProviderGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AWSCodeCommit != nil { + { + size, err := m.AWSCodeCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if len(m.Values) > 0 { + keysForValues := make([]string, 0, len(m.Values)) + for k := range m.Values { + keysForValues = append(keysForValues, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + for iNdEx := len(keysForValues) - 1; iNdEx >= 0; iNdEx-- { + v := m.Values[string(keysForValues[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForValues[iNdEx]) + copy(dAtA[i:], keysForValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForValues[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } { size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -11403,6 +12616,61 @@ func (m *SCMProviderGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SCMProviderGeneratorAWSCodeCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SCMProviderGeneratorAWSCodeCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SCMProviderGeneratorAWSCodeCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AllBranches { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + if len(m.TagFilters) > 0 { + for iNdEx := len(m.TagFilters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TagFilters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *SCMProviderGeneratorAzureDevOps) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11769,6 +13037,14 @@ func (m *SCMProviderGeneratorGitlab) MarshalToSizedBuffer(dAtA []byte) (int, err var l int _ = l i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- if m.AllBranches { dAtA[i] = 1 } else { @@ -12056,6 +13332,18 @@ func (m *SyncOperationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ManagedNamespaceMetadata != nil { + { + size, err := m.ManagedNamespaceMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Revisions) > 0 { for iNdEx := len(m.Revisions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Revisions[iNdEx]) @@ -12524,6 +13812,39 @@ func (m *TLSClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TagFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { offset -= sovGenerated(v) base := offset @@ -12761,6 +14082,21 @@ func (m *ApplicationMatchExpression) Size() (n int) { return n } +func (m *ApplicationPreservedFields) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for _, s := range m.Annotations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ApplicationSet) Size() (n int) { if m == nil { return 0 @@ -12860,6 +14196,10 @@ func (m *ApplicationSetGenerator) Size() (n int) { l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Plugin != nil { + l = m.Plugin.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12922,6 +14262,10 @@ func (m *ApplicationSetNestedGenerator) Size() (n int) { l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Plugin != nil { + l = m.Plugin.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12982,6 +14326,17 @@ func (m *ApplicationSetSpec) Size() (n int) { l = m.Strategy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PreservedFields != nil { + l = m.PreservedFields.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.GoTemplateOptions) > 0 { + for _, s := range m.GoTemplateOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 return n } @@ -13028,6 +14383,10 @@ func (m *ApplicationSetSyncPolicy) Size() (n int) { var l int _ = l n += 2 + if m.ApplicationsSync != nil { + l = len(*m.ApplicationsSync) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -13109,6 +14468,14 @@ func (m *ApplicationSetTerminalGenerator) Size() (n int) { l = m.PullRequest.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Plugin != nil { + l = m.Plugin.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -13196,6 +14563,10 @@ func (m *ApplicationSourceHelm) Size() (n int) { n += 2 n += 2 n += 2 + if m.ValuesObject != nil { + l = m.ValuesObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -13262,6 +14633,15 @@ func (m *ApplicationSourceKustomize) Size() (n int) { } n += 2 n += 2 + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Replicas) > 0 { + for _, e := range m.Replicas { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -13296,19 +14676,13 @@ func (m *ApplicationSourcePluginParameter) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.OptionalMap != nil { + l = m.OptionalMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Array) > 0 { - for _, s := range m.Array { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.OptionalArray != nil { + l = m.OptionalArray.Size() + n += 1 + l + sovGenerated(uint64(l)) } if m.String_ != nil { l = len(*m.String_) @@ -13411,6 +14785,8 @@ func (m *ApplicationStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.ControllerNamespace) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -13506,6 +14882,38 @@ func (m *BasicAuthBitbucketServer) Size() (n int) { return n } +func (m *BearerTokenBitbucketCloud) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TokenRef != nil { + l = m.TokenRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ChartDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Home) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Maintainers) > 0 { + for _, s := range m.Maintainers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Cluster) Size() (n int) { if m == nil { return 0 @@ -13696,6 +15104,12 @@ func (m *ComparedTo) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.IgnoreDifferences) > 0 { + for _, e := range m.IgnoreDifferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -13871,6 +15285,14 @@ func (m *GitGenerator) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.PathParamPrefix) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for k, v := range m.Values { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -14094,6 +15516,19 @@ func (m *KustomizeOptions) Size() (n int) { return n } +func (m *KustomizeReplica) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Count.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ListGenerator) Size() (n int) { if m == nil { return 0 @@ -14108,6 +15543,8 @@ func (m *ListGenerator) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ElementsYaml) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14273,6 +15710,38 @@ func (m *OperationState) Size() (n int) { return n } +func (m *OptionalArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Array) > 0 { + for _, s := range m.Array { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OptionalMap) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *OrphanedResourceKey) Size() (n int) { if m == nil { return 0 @@ -14333,6 +15802,61 @@ func (m *OverrideIgnoreDiff) Size() (n int) { return n } +func (m *PluginConfigMapRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PluginGenerator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Input.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RequeueAfterSeconds != nil { + n += 1 + sovGenerated(uint64(*m.RequeueAfterSeconds)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for k, v := range m.Values { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PluginInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *ProjectRole) Size() (n int) { if m == nil { return 0 @@ -14397,6 +15921,64 @@ func (m *PullRequestGenerator) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.Bitbucket != nil { + l = m.Bitbucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDevOps != nil { + l = m.AzureDevOps.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PullRequestGeneratorAzureDevOps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Organization) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Project) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Repo) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.API) + n += 1 + l + sovGenerated(uint64(l)) + if m.TokenRef != nil { + l = m.TokenRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Labels) > 0 { + for _, s := range m.Labels { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PullRequestGeneratorBitbucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Repo) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.API) + n += 1 + l + sovGenerated(uint64(l)) + if m.BasicAuth != nil { + l = m.BasicAuth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BearerToken != nil { + l = m.BearerToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -14429,6 +16011,10 @@ func (m *PullRequestGeneratorFilter) Size() (n int) { l = len(*m.BranchMatch) n += 1 + l + sovGenerated(uint64(l)) } + if m.TargetBranchMatch != nil { + l = len(*m.TargetBranchMatch) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -14454,6 +16040,7 @@ func (m *PullRequestGeneratorGitLab) Size() (n int) { } l = len(m.PullRequestState) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -14686,6 +16273,10 @@ func (m *ResourceAction) Size() (n int) { } } n += 2 + l = len(m.IconClass) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14907,6 +16498,8 @@ func (m *ResourceOverride) Size() (n int) { } } n += 2 + l = m.IgnoreResourceUpdates.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -15100,6 +16693,38 @@ func (m *SCMProviderGenerator) Size() (n int) { } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for k, v := range m.Values { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AWSCodeCommit != nil { + l = m.AWSCodeCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SCMProviderGeneratorAWSCodeCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TagFilters) > 0 { + for _, e := range m.TagFilters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -15247,6 +16872,7 @@ func (m *SCMProviderGeneratorGitlab) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } n += 2 + n += 2 return n } @@ -15370,6 +16996,10 @@ func (m *SyncOperationResult) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.ManagedNamespaceMetadata != nil { + l = m.ManagedNamespaceMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -15531,6 +17161,19 @@ func (m *TLSClientConfig) Size() (n int) { return n } +func (m *TagFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -15723,6 +17366,16 @@ func (this *ApplicationMatchExpression) String() string { }, "") return s } +func (this *ApplicationPreservedFields) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplicationPreservedFields{`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `}`, + }, "") + return s +} func (this *ApplicationSet) String() string { if this == nil { return "nil" @@ -15777,6 +17430,7 @@ func (this *ApplicationSetGenerator) String() string { `Matrix:` + strings.Replace(this.Matrix.String(), "MatrixGenerator", "MatrixGenerator", 1) + `,`, `Merge:` + strings.Replace(this.Merge.String(), "MergeGenerator", "MergeGenerator", 1) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Plugin:` + strings.Replace(this.Plugin.String(), "PluginGenerator", "PluginGenerator", 1) + `,`, `}`, }, "") return s @@ -15811,6 +17465,7 @@ func (this *ApplicationSetNestedGenerator) String() string { `Matrix:` + strings.Replace(fmt.Sprintf("%v", this.Matrix), "JSON", "v11.JSON", 1) + `,`, `Merge:` + strings.Replace(fmt.Sprintf("%v", this.Merge), "JSON", "v11.JSON", 1) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Plugin:` + strings.Replace(this.Plugin.String(), "PluginGenerator", "PluginGenerator", 1) + `,`, `}`, }, "") return s @@ -15861,6 +17516,9 @@ func (this *ApplicationSetSpec) String() string { `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, `SyncPolicy:` + strings.Replace(this.SyncPolicy.String(), "ApplicationSetSyncPolicy", "ApplicationSetSyncPolicy", 1) + `,`, `Strategy:` + strings.Replace(this.Strategy.String(), "ApplicationSetStrategy", "ApplicationSetStrategy", 1) + `,`, + `PreservedFields:` + strings.Replace(this.PreservedFields.String(), "ApplicationPreservedFields", "ApplicationPreservedFields", 1) + `,`, + `GoTemplateOptions:` + fmt.Sprintf("%v", this.GoTemplateOptions) + `,`, + `ApplyNestedSelectors:` + fmt.Sprintf("%v", this.ApplyNestedSelectors) + `,`, `}`, }, "") return s @@ -15903,6 +17561,7 @@ func (this *ApplicationSetSyncPolicy) String() string { } s := strings.Join([]string{`&ApplicationSetSyncPolicy{`, `PreserveResourcesOnDeletion:` + fmt.Sprintf("%v", this.PreserveResourcesOnDeletion) + `,`, + `ApplicationsSync:` + valueToStringGenerated(this.ApplicationsSync) + `,`, `}`, }, "") return s @@ -15963,6 +17622,8 @@ func (this *ApplicationSetTerminalGenerator) String() string { `SCMProvider:` + strings.Replace(this.SCMProvider.String(), "SCMProviderGenerator", "SCMProviderGenerator", 1) + `,`, `ClusterDecisionResource:` + strings.Replace(this.ClusterDecisionResource.String(), "DuckTypeGenerator", "DuckTypeGenerator", 1) + `,`, `PullRequest:` + strings.Replace(this.PullRequest.String(), "PullRequestGenerator", "PullRequestGenerator", 1) + `,`, + `Plugin:` + strings.Replace(this.Plugin.String(), "PluginGenerator", "PluginGenerator", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -16022,6 +17683,7 @@ func (this *ApplicationSourceHelm) String() string { `PassCredentials:` + fmt.Sprintf("%v", this.PassCredentials) + `,`, `IgnoreMissingValueFiles:` + fmt.Sprintf("%v", this.IgnoreMissingValueFiles) + `,`, `SkipCrds:` + fmt.Sprintf("%v", this.SkipCrds) + `,`, + `ValuesObject:` + strings.Replace(fmt.Sprintf("%v", this.ValuesObject), "RawExtension", "runtime.RawExtension", 1) + `,`, `}`, }, "") return s @@ -16052,6 +17714,11 @@ func (this *ApplicationSourceKustomize) String() string { if this == nil { return "nil" } + repeatedStringForReplicas := "[]KustomizeReplica{" + for _, f := range this.Replicas { + repeatedStringForReplicas += strings.Replace(strings.Replace(f.String(), "KustomizeReplica", "KustomizeReplica", 1), `&`, ``, 1) + "," + } + repeatedStringForReplicas += "}" keysForCommonLabels := make([]string, 0, len(this.CommonLabels)) for k := range this.CommonLabels { keysForCommonLabels = append(keysForCommonLabels, k) @@ -16081,6 +17748,9 @@ func (this *ApplicationSourceKustomize) String() string { `CommonAnnotations:` + mapStringForCommonAnnotations + `,`, `ForceCommonLabels:` + fmt.Sprintf("%v", this.ForceCommonLabels) + `,`, `ForceCommonAnnotations:` + fmt.Sprintf("%v", this.ForceCommonAnnotations) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `CommonAnnotationsEnvsubst:` + fmt.Sprintf("%v", this.CommonAnnotationsEnvsubst) + `,`, + `Replicas:` + repeatedStringForReplicas + `,`, `}`, }, "") return s @@ -16111,20 +17781,10 @@ func (this *ApplicationSourcePluginParameter) String() string { if this == nil { return "nil" } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]string{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" s := strings.Join([]string{`&ApplicationSourcePluginParameter{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Map:` + mapStringForMap + `,`, - `Array:` + fmt.Sprintf("%v", this.Array) + `,`, + `OptionalMap:` + strings.Replace(this.OptionalMap.String(), "OptionalMap", "OptionalMap", 1) + `,`, + `OptionalArray:` + strings.Replace(this.OptionalArray.String(), "OptionalArray", "OptionalArray", 1) + `,`, `String_:` + valueToStringGenerated(this.String_) + `,`, `}`, }, "") @@ -16194,6 +17854,7 @@ func (this *ApplicationStatus) String() string { `Summary:` + strings.Replace(strings.Replace(this.Summary.String(), "ApplicationSummary", "ApplicationSummary", 1), `&`, ``, 1) + `,`, `ResourceHealthSource:` + fmt.Sprintf("%v", this.ResourceHealthSource) + `,`, `SourceTypes:` + fmt.Sprintf("%v", this.SourceTypes) + `,`, + `ControllerNamespace:` + fmt.Sprintf("%v", this.ControllerNamespace) + `,`, `}`, }, "") return s @@ -16270,6 +17931,28 @@ func (this *BasicAuthBitbucketServer) String() string { }, "") return s } +func (this *BearerTokenBitbucketCloud) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BearerTokenBitbucketCloud{`, + `TokenRef:` + strings.Replace(this.TokenRef.String(), "SecretRef", "SecretRef", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ChartDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ChartDetails{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Home:` + fmt.Sprintf("%v", this.Home) + `,`, + `Maintainers:` + fmt.Sprintf("%v", this.Maintainers) + `,`, + `}`, + }, "") + return s +} func (this *Cluster) String() string { if this == nil { return "nil" @@ -16411,10 +18094,16 @@ func (this *ComparedTo) String() string { repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "ApplicationSource", "ApplicationSource", 1), `&`, ``, 1) + "," } repeatedStringForSources += "}" + repeatedStringForIgnoreDifferences := "[]ResourceIgnoreDifferences{" + for _, f := range this.IgnoreDifferences { + repeatedStringForIgnoreDifferences += strings.Replace(strings.Replace(f.String(), "ResourceIgnoreDifferences", "ResourceIgnoreDifferences", 1), `&`, ``, 1) + "," + } + repeatedStringForIgnoreDifferences += "}" s := strings.Join([]string{`&ComparedTo{`, `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ApplicationSource", "ApplicationSource", 1), `&`, ``, 1) + `,`, `Destination:` + strings.Replace(strings.Replace(this.Destination.String(), "ApplicationDestination", "ApplicationDestination", 1), `&`, ``, 1) + `,`, `Sources:` + repeatedStringForSources + `,`, + `IgnoreDifferences:` + repeatedStringForIgnoreDifferences + `,`, `}`, }, "") return s @@ -16551,6 +18240,16 @@ func (this *GitGenerator) String() string { repeatedStringForFiles += strings.Replace(strings.Replace(f.String(), "GitFileGeneratorItem", "GitFileGeneratorItem", 1), `&`, ``, 1) + "," } repeatedStringForFiles += "}" + keysForValues := make([]string, 0, len(this.Values)) + for k := range this.Values { + keysForValues = append(keysForValues, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + mapStringForValues := "map[string]string{" + for _, k := range keysForValues { + mapStringForValues += fmt.Sprintf("%v: %v,", k, this.Values[k]) + } + mapStringForValues += "}" s := strings.Join([]string{`&GitGenerator{`, `RepoURL:` + fmt.Sprintf("%v", this.RepoURL) + `,`, `Directories:` + repeatedStringForDirectories + `,`, @@ -16559,6 +18258,7 @@ func (this *GitGenerator) String() string { `RequeueAfterSeconds:` + valueToStringGenerated(this.RequeueAfterSeconds) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, `PathParamPrefix:` + fmt.Sprintf("%v", this.PathParamPrefix) + `,`, + `Values:` + mapStringForValues + `,`, `}`, }, "") return s @@ -16751,6 +18451,17 @@ func (this *KustomizeOptions) String() string { }, "") return s } +func (this *KustomizeReplica) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KustomizeReplica{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Count:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Count), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ListGenerator) String() string { if this == nil { return "nil" @@ -16763,6 +18474,7 @@ func (this *ListGenerator) String() string { s := strings.Join([]string{`&ListGenerator{`, `Elements:` + repeatedStringForElements + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, + `ElementsYaml:` + fmt.Sprintf("%v", this.ElementsYaml) + `,`, `}`, }, "") return s @@ -16907,6 +18619,36 @@ func (this *OperationState) String() string { }, "") return s } +func (this *OptionalArray) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OptionalArray{`, + `Array:` + fmt.Sprintf("%v", this.Array) + `,`, + `}`, + }, "") + return s +} +func (this *OptionalMap) String() string { + if this == nil { + return "nil" + } + keysForMap := make([]string, 0, len(this.Map)) + for k := range this.Map { + keysForMap = append(keysForMap, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMap) + mapStringForMap := "map[string]string{" + for _, k := range keysForMap { + mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) + } + mapStringForMap += "}" + s := strings.Join([]string{`&OptionalMap{`, + `Map:` + mapStringForMap + `,`, + `}`, + }, "") + return s +} func (this *OrphanedResourceKey) String() string { if this == nil { return "nil" @@ -16947,6 +18689,60 @@ func (this *OverrideIgnoreDiff) String() string { }, "") return s } +func (this *PluginConfigMapRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginConfigMapRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PluginGenerator) String() string { + if this == nil { + return "nil" + } + keysForValues := make([]string, 0, len(this.Values)) + for k := range this.Values { + keysForValues = append(keysForValues, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + mapStringForValues := "map[string]string{" + for _, k := range keysForValues { + mapStringForValues += fmt.Sprintf("%v: %v,", k, this.Values[k]) + } + mapStringForValues += "}" + s := strings.Join([]string{`&PluginGenerator{`, + `ConfigMapRef:` + strings.Replace(strings.Replace(this.ConfigMapRef.String(), "PluginConfigMapRef", "PluginConfigMapRef", 1), `&`, ``, 1) + `,`, + `Input:` + strings.Replace(strings.Replace(this.Input.String(), "PluginInput", "PluginInput", 1), `&`, ``, 1) + `,`, + `RequeueAfterSeconds:` + valueToStringGenerated(this.RequeueAfterSeconds) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, + `Values:` + mapStringForValues + `,`, + `}`, + }, "") + return s +} +func (this *PluginInput) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "PluginParameters{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&PluginInput{`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} func (this *ProjectRole) String() string { if this == nil { return "nil" @@ -16983,6 +18779,37 @@ func (this *PullRequestGenerator) String() string { `Filters:` + repeatedStringForFilters + `,`, `RequeueAfterSeconds:` + valueToStringGenerated(this.RequeueAfterSeconds) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, + `Bitbucket:` + strings.Replace(this.Bitbucket.String(), "PullRequestGeneratorBitbucket", "PullRequestGeneratorBitbucket", 1) + `,`, + `AzureDevOps:` + strings.Replace(this.AzureDevOps.String(), "PullRequestGeneratorAzureDevOps", "PullRequestGeneratorAzureDevOps", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PullRequestGeneratorAzureDevOps) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PullRequestGeneratorAzureDevOps{`, + `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, + `Project:` + fmt.Sprintf("%v", this.Project) + `,`, + `Repo:` + fmt.Sprintf("%v", this.Repo) + `,`, + `API:` + fmt.Sprintf("%v", this.API) + `,`, + `TokenRef:` + strings.Replace(this.TokenRef.String(), "SecretRef", "SecretRef", 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} +func (this *PullRequestGeneratorBitbucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PullRequestGeneratorBitbucket{`, + `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, + `Repo:` + fmt.Sprintf("%v", this.Repo) + `,`, + `API:` + fmt.Sprintf("%v", this.API) + `,`, + `BasicAuth:` + strings.Replace(this.BasicAuth.String(), "BasicAuthBitbucketServer", "BasicAuthBitbucketServer", 1) + `,`, + `BearerToken:` + strings.Replace(this.BearerToken.String(), "BearerTokenBitbucketCloud", "BearerTokenBitbucketCloud", 1) + `,`, `}`, }, "") return s @@ -17006,6 +18833,7 @@ func (this *PullRequestGeneratorFilter) String() string { } s := strings.Join([]string{`&PullRequestGeneratorFilter{`, `BranchMatch:` + valueToStringGenerated(this.BranchMatch) + `,`, + `TargetBranchMatch:` + valueToStringGenerated(this.TargetBranchMatch) + `,`, `}`, }, "") return s @@ -17020,6 +18848,7 @@ func (this *PullRequestGeneratorGitLab) String() string { `TokenRef:` + strings.Replace(this.TokenRef.String(), "SecretRef", "SecretRef", 1) + `,`, `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `PullRequestState:` + fmt.Sprintf("%v", this.PullRequestState) + `,`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, `}`, }, "") return s @@ -17195,6 +19024,8 @@ func (this *ResourceAction) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Params:` + repeatedStringForParams + `,`, `Disabled:` + fmt.Sprintf("%v", this.Disabled) + `,`, + `IconClass:` + fmt.Sprintf("%v", this.IconClass) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, `}`, }, "") return s @@ -17362,6 +19193,7 @@ func (this *ResourceOverride) String() string { `Actions:` + fmt.Sprintf("%v", this.Actions) + `,`, `KnownTypeFields:` + repeatedStringForKnownTypeFields + `,`, `UseOpenLibs:` + fmt.Sprintf("%v", this.UseOpenLibs) + `,`, + `IgnoreResourceUpdates:` + strings.Replace(strings.Replace(this.IgnoreResourceUpdates.String(), "OverrideIgnoreDiff", "OverrideIgnoreDiff", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -17474,6 +19306,16 @@ func (this *SCMProviderGenerator) String() string { repeatedStringForFilters += strings.Replace(strings.Replace(f.String(), "SCMProviderGeneratorFilter", "SCMProviderGeneratorFilter", 1), `&`, ``, 1) + "," } repeatedStringForFilters += "}" + keysForValues := make([]string, 0, len(this.Values)) + for k := range this.Values { + keysForValues = append(keysForValues, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForValues) + mapStringForValues := "map[string]string{" + for _, k := range keysForValues { + mapStringForValues += fmt.Sprintf("%v: %v,", k, this.Values[k]) + } + mapStringForValues += "}" s := strings.Join([]string{`&SCMProviderGenerator{`, `Github:` + strings.Replace(this.Github.String(), "SCMProviderGeneratorGithub", "SCMProviderGeneratorGithub", 1) + `,`, `Gitlab:` + strings.Replace(this.Gitlab.String(), "SCMProviderGeneratorGitlab", "SCMProviderGeneratorGitlab", 1) + `,`, @@ -17485,6 +19327,26 @@ func (this *SCMProviderGenerator) String() string { `CloneProtocol:` + fmt.Sprintf("%v", this.CloneProtocol) + `,`, `RequeueAfterSeconds:` + valueToStringGenerated(this.RequeueAfterSeconds) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, + `Values:` + mapStringForValues + `,`, + `AWSCodeCommit:` + strings.Replace(this.AWSCodeCommit.String(), "SCMProviderGeneratorAWSCodeCommit", "SCMProviderGeneratorAWSCodeCommit", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SCMProviderGeneratorAWSCodeCommit) String() string { + if this == nil { + return "nil" + } + repeatedStringForTagFilters := "[]*TagFilter{" + for _, f := range this.TagFilters { + repeatedStringForTagFilters += strings.Replace(f.String(), "TagFilter", "TagFilter", 1) + "," + } + repeatedStringForTagFilters += "}" + s := strings.Join([]string{`&SCMProviderGeneratorAWSCodeCommit{`, + `TagFilters:` + repeatedStringForTagFilters + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `AllBranches:` + fmt.Sprintf("%v", this.AllBranches) + `,`, `}`, }, "") return s @@ -17581,6 +19443,7 @@ func (this *SCMProviderGeneratorGitlab) String() string { `API:` + fmt.Sprintf("%v", this.API) + `,`, `TokenRef:` + strings.Replace(this.TokenRef.String(), "SecretRef", "SecretRef", 1) + `,`, `AllBranches:` + fmt.Sprintf("%v", this.AllBranches) + `,`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, `}`, }, "") return s @@ -17668,6 +19531,7 @@ func (this *SyncOperationResult) String() string { `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ApplicationSource", "ApplicationSource", 1), `&`, ``, 1) + `,`, `Sources:` + repeatedStringForSources + `,`, `Revisions:` + fmt.Sprintf("%v", this.Revisions) + `,`, + `ManagedNamespaceMetadata:` + strings.Replace(this.ManagedNamespaceMetadata.String(), "ManagedNamespaceMetadata", "ManagedNamespaceMetadata", 1) + `,`, `}`, }, "") return s @@ -17772,6 +19636,17 @@ func (this *TLSClientConfig) String() string { }, "") return s } +func (this *TagFilter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagFilter{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -19557,6 +21432,88 @@ func (m *ApplicationMatchExpression) Unmarshal(dAtA []byte) error { } return nil } +func (m *ApplicationPreservedFields) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationPreservedFields: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationPreservedFields: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ApplicationSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -20487,6 +22444,42 @@ func (m *ApplicationSetGenerator) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plugin == nil { + m.Plugin = &PluginGenerator{} + } + if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20978,6 +22971,42 @@ func (m *ApplicationSetNestedGenerator) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plugin == nil { + m.Plugin = &PluginGenerator{} + } + if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21391,59 +23420,147 @@ func (m *ApplicationSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreservedFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreservedFields == nil { + m.PreservedFields = &ApplicationPreservedFields{} + } + if err := m.PreservedFields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GoTemplateOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GoTemplateOptions = append(m.GoTemplateOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplyNestedSelectors", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ApplyNestedSelectors = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21697,6 +23814,39 @@ func (m *ApplicationSetSyncPolicy) Unmarshal(dAtA []byte) error { } } m.PreserveResourcesOnDeletion = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationsSync", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ApplicationsSyncPolicy(dAtA[iNdEx:postIndex]) + m.ApplicationsSync = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22479,6 +24629,78 @@ func (m *ApplicationSetTerminalGenerator) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugin", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plugin == nil { + m.Plugin = &PluginGenerator{} + } + if err := m.Plugin.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23306,6 +25528,42 @@ func (m *ApplicationSourceHelm) Unmarshal(dAtA []byte) error { } } m.SkipCrds = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValuesObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValuesObject == nil { + m.ValuesObject = &runtime.RawExtension{} + } + if err := m.ValuesObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23928,6 +26186,92 @@ func (m *ApplicationSourceKustomize) Unmarshal(dAtA []byte) error { } } m.ForceCommonAnnotations = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonAnnotationsEnvsubst", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CommonAnnotationsEnvsubst = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Replicas = append(m.Replicas, KustomizeReplica{}) + if err := m.Replicas[len(m.Replicas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -24162,7 +26506,7 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OptionalMap", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24189,109 +26533,18 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Map == nil { - m.Map = make(map[string]string) + if m.OptionalMap == nil { + m.OptionalMap = &OptionalMap{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.OptionalMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Map[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Array", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OptionalArray", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24301,23 +26554,27 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Array = append(m.Array, string(dAtA[iNdEx:postIndex])) + if m.OptionalArray == nil { + m.OptionalArray = &OptionalArray{} + } + if err := m.OptionalArray.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: if wireType != 2 { @@ -25116,91 +27373,9 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { } m.SourceTypes = append(m.SourceTypes, ApplicationSourceType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplicationSummary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSummary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalURLs = append(m.ExternalURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControllerNamespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25228,7 +27403,121 @@ func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) + m.ControllerNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationSummary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationSummary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalURLs = append(m.ExternalURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -25770,7 +28059,7 @@ func (m *BasicAuthBitbucketServer) Unmarshal(dAtA []byte) error { } return nil } -func (m *Cluster) Unmarshal(dAtA []byte) error { +func (m *BearerTokenBitbucketCloud) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25793,79 +28082,15 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + return fmt.Errorf("proto: BearerTokenBitbucketCloud: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BearerTokenBitbucketCloud: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Server = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TokenRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25892,46 +28117,342 @@ func (m *Cluster) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TokenRef == nil { + m.TokenRef = &SecretRef{} + } + if err := m.TokenRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChartDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.ConnectionState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 5: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChartDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChartDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Home", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Home = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Maintainers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Maintainers = append(m.Maintainers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27586,6 +30107,40 @@ func (m *ComparedTo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreDifferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IgnoreDifferences = append(m.IgnoreDifferences, ResourceIgnoreDifferences{}) + if err := m.IgnoreDifferences[len(m.IgnoreDifferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29250,6 +31805,133 @@ func (m *GitGenerator) Unmarshal(dAtA []byte) error { } m.PathParamPrefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Values == nil { + m.Values = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Values[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31156,6 +33838,121 @@ func (m *KustomizeOptions) Unmarshal(dAtA []byte) error { } return nil } +func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizeReplica: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizeReplica: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ListGenerator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -31252,6 +34049,38 @@ func (m *ListGenerator) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ElementsYaml", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ElementsYaml = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32602,7 +35431,7 @@ func (m *OperationState) Unmarshal(dAtA []byte) error { } return nil } -func (m *OrphanedResourceKey) Unmarshal(dAtA []byte) error { +func (m *OptionalArray) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32625,15 +35454,15 @@ func (m *OrphanedResourceKey) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OrphanedResourceKey: wiretype end group for non-group") + return fmt.Errorf("proto: OptionalArray: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OrphanedResourceKey: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OptionalArray: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Array", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32661,11 +35490,270 @@ func (m *OrphanedResourceKey) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.Array = append(m.Array, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Map == nil { + m.Map = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Map[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrphanedResourceKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrphanedResourceKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrphanedResourceKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32999,6 +36087,563 @@ func (m *OverrideIgnoreDiff) Unmarshal(dAtA []byte) error { } return nil } +func (m *PluginConfigMapRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginConfigMapRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginConfigMapRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginGenerator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginGenerator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginGenerator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Input.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequeueAfterSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequeueAfterSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Values == nil { + m.Values = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Values[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(PluginParameters) + } + var mapkey string + mapvalue := &v11.JSON{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v11.JSON{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ProjectRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -33377,18 +37022,391 @@ func (m *PullRequestGenerator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BitbucketServer == nil { - m.BitbucketServer = &PullRequestGeneratorBitbucketServer{} + if m.BitbucketServer == nil { + m.BitbucketServer = &PullRequestGeneratorBitbucketServer{} + } + if err := m.BitbucketServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, PullRequestGeneratorFilter{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequeueAfterSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequeueAfterSeconds = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bitbucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Bitbucket == nil { + m.Bitbucket = &PullRequestGeneratorBitbucket{} + } + if err := m.Bitbucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDevOps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDevOps == nil { + m.AzureDevOps = &PullRequestGeneratorAzureDevOps{} + } + if err := m.AzureDevOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PullRequestGeneratorAzureDevOps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullRequestGeneratorAzureDevOps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullRequestGeneratorAzureDevOps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organization = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Project = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field API", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.API = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TokenRef == nil { + m.TokenRef = &SecretRef{} } - if err := m.BitbucketServer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TokenRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33398,31 +37416,111 @@ func (m *PullRequestGenerator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Filters = append(m.Filters, PullRequestGeneratorFilter{}) - if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PullRequestGeneratorBitbucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullRequestGeneratorBitbucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullRequestGeneratorBitbucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequeueAfterSeconds", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) } - var v int64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33432,15 +37530,59 @@ func (m *PullRequestGenerator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.RequeueAfterSeconds = &v - case 7: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field API", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.API = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BasicAuth", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33467,7 +37609,46 @@ func (m *PullRequestGenerator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.BasicAuth == nil { + m.BasicAuth = &BasicAuthBitbucketServer{} + } + if err := m.BasicAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BearerToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BearerToken == nil { + m.BearerToken = &BearerTokenBitbucketCloud{} + } + if err := m.BearerToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -33736,6 +37917,39 @@ func (m *PullRequestGeneratorFilter) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.BranchMatch = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetBranchMatch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TargetBranchMatch = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33916,13 +38130,45 @@ func (m *PullRequestGeneratorGitLab) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullRequestState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PullRequestState = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PullRequestState", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33932,24 +38178,12 @@ func (m *PullRequestGeneratorGitLab) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PullRequestState = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Insecure = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36032,7 +40266,241 @@ func (m *RepositoryCertificate) Unmarshal(dAtA []byte) error { } return nil } -func (m *RepositoryCertificateList) Unmarshal(dAtA []byte) error { +func (m *RepositoryCertificateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryCertificateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryCertificateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RepositoryCertificate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &Repository{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAction) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36055,17 +40523,17 @@ func (m *RepositoryCertificateList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RepositoryCertificateList: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceAction: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RepositoryCertificateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceAction: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36075,112 +40543,27 @@ func (m *RepositoryCertificateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RepositoryCertificate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RepositoryList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RepositoryList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RepositoryList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36207,15 +40590,16 @@ func (m *RepositoryList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Params = append(m.Params, ResourceActionParam{}) + if err := m.Params[len(m.Params)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36225,79 +40609,15 @@ func (m *RepositoryList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &Repository{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Disabled = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IconClass", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36325,13 +40645,13 @@ func (m *ResourceAction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.IconClass = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36341,46 +40661,24 @@ func (m *ResourceAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Params = append(m.Params, ResourceActionParam{}) - if err := m.Params[len(m.Params)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DisplayName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38401,6 +42699,39 @@ func (m *ResourceOverride) Unmarshal(dAtA []byte) error { } } m.UseOpenLibs = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreResourceUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IgnoreResourceUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40194,16 +44525,348 @@ func (m *SCMProviderGenerator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AzureDevOps == nil { - m.AzureDevOps = &SCMProviderGeneratorAzureDevOps{} + if m.AzureDevOps == nil { + m.AzureDevOps = &SCMProviderGeneratorAzureDevOps{} + } + if err := m.AzureDevOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, SCMProviderGeneratorFilter{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloneProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloneProtocol = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequeueAfterSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequeueAfterSeconds = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Values == nil { + m.Values = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Values[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSCodeCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSCodeCommit == nil { + m.AWSCodeCommit = &SCMProviderGeneratorAWSCodeCommit{} } - if err := m.AzureDevOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AWSCodeCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SCMProviderGeneratorAWSCodeCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SCMProviderGeneratorAWSCodeCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SCMProviderGeneratorAWSCodeCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TagFilters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40230,14 +44893,14 @@ func (m *SCMProviderGenerator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filters = append(m.Filters, SCMProviderGeneratorFilter{}) - if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TagFilters = append(m.TagFilters, &TagFilter{}) + if err := m.TagFilters[len(m.TagFilters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloneProtocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40265,13 +44928,13 @@ func (m *SCMProviderGenerator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CloneProtocol = string(dAtA[iNdEx:postIndex]) + m.Role = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequeueAfterSeconds", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) } - var v int64 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40281,17 +44944,29 @@ func (m *SCMProviderGenerator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.RequeueAfterSeconds = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var msglen int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllBranches", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40301,25 +44976,12 @@ func (m *SCMProviderGenerator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.AllBranches = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -41657,6 +46319,26 @@ func (m *SCMProviderGeneratorGitlab) Unmarshal(dAtA []byte) error { } } m.AllBranches = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42355,13 +47037,161 @@ func (m *SyncOperationResource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SyncOperationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncOperationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncOperationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &ResourceResult{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -42371,77 +47201,28 @@ func (m *SyncOperationResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SyncOperationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SyncOperationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SyncOperationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -42468,14 +47249,14 @@ func (m *SyncOperationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, &ResourceResult{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Sources = append(m.Sources, ApplicationSource{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Revisions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -42503,11 +47284,11 @@ func (m *SyncOperationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Revision = string(dAtA[iNdEx:postIndex]) + m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedNamespaceMetadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -42534,76 +47315,13 @@ func (m *SyncOperationResult) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.ManagedNamespaceMetadata == nil { + m.ManagedNamespaceMetadata = &ManagedNamespaceMetadata{} } - m.Sources = append(m.Sources, ApplicationSource{}) - if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ManagedNamespaceMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revisions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revisions = append(m.Revisions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -43877,6 +48595,120 @@ func (m *TLSClientConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *TagFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto index 729f684b2d..ee76a7585f 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto @@ -116,7 +116,7 @@ message Application { optional Operation operation = 4; } -// ApplicationCondition contains details about an application condition, which is usally an error or warning +// ApplicationCondition contains details about an application condition, which is usually an error or warning message ApplicationCondition { // Type is an application condition type optional string type = 1; @@ -157,6 +157,10 @@ message ApplicationMatchExpression { repeated string values = 3; } +message ApplicationPreservedFields { + repeated string annotations = 1; +} + // ApplicationSet is a set of Application resources // +genclient // +genclient:noStatus @@ -227,6 +231,8 @@ message ApplicationSetGenerator { // Selector allows to post-filter all generator. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 9; + + optional PluginGenerator plugin = 10; } // ApplicationSetList contains a list of ApplicationSet @@ -261,6 +267,8 @@ message ApplicationSetNestedGenerator { // Selector allows to post-filter all generator. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 9; + + optional PluginGenerator plugin = 10; } message ApplicationSetRolloutStep { @@ -284,6 +292,13 @@ message ApplicationSetSpec { optional ApplicationSetSyncPolicy syncPolicy = 4; optional ApplicationSetStrategy strategy = 5; + + optional ApplicationPreservedFields preservedFields = 6; + + repeated string goTemplateOptions = 7; + + // ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators + optional bool applyNestedSelectors = 8; } // ApplicationSetStatus defines the observed state of ApplicationSet @@ -307,6 +322,11 @@ message ApplicationSetStrategy { message ApplicationSetSyncPolicy { // PreserveResourcesOnDeletion will preserve resources on deletion. If PreserveResourcesOnDeletion is set to true, these Applications will not be deleted. optional bool preserveResourcesOnDeletion = 1; + + // ApplicationsSync represents the policy applied on the generated applications. Possible values are create-only, create-update, create-delete, sync + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=create-only;create-update;create-delete;sync + optional string applicationsSync = 2; } // ApplicationSetTemplate represents argocd ApplicationSpec @@ -346,6 +366,11 @@ message ApplicationSetTerminalGenerator { optional DuckTypeGenerator clusterDecisionResource = 5; optional PullRequestGenerator pullRequest = 6; + + optional PluginGenerator plugin = 7; + + // Selector allows to post-filter all generator. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 8; } // ApplicationSource contains all required information about the source of an application @@ -406,7 +431,8 @@ message ApplicationSourceHelm { // ReleaseName is the Helm release name to use. If omitted it will use the application name optional string releaseName = 3; - // Values specifies Helm values to be passed to helm template, typically defined as a block + // Values specifies Helm values to be passed to helm template, typically defined as a block. ValuesObject takes precedence over Values, so use one or the other. + // +patchStrategy=replace optional string values = 4; // FileParameters are file parameters to the helm template @@ -423,6 +449,10 @@ message ApplicationSourceHelm { // SkipCrds skips custom resource definition installation step (Helm's --skip-crds) optional bool skipCrds = 9; + + // ValuesObject specifies Helm values to be passed to helm template, defined as a map. This takes precedence over Values. + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension valuesObject = 10; } // ApplicationSourceJsonnet holds options specific to applications of type Jsonnet @@ -462,6 +492,15 @@ message ApplicationSourceKustomize { // ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps optional bool forceCommonAnnotations = 8; + + // Namespace sets the namespace that Kustomize adds to all resources + optional string namespace = 9; + + // CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values + optional bool commonAnnotationsEnvsubst = 10; + + // Replicas is a list of Kustomize Replicas override specifications + repeated KustomizeReplica replicas = 11; } // ApplicationSourcePlugin holds options specific to config management plugins @@ -481,10 +520,10 @@ message ApplicationSourcePluginParameter { optional string string = 5; // Map is the value of a map type parameter. - map map = 3; + optional OptionalMap map = 3; // Array is the value of an array type parameter. - repeated string array = 4; + optional OptionalArray array = 4; } // ApplicationSpec represents desired application state. Contains link to repository with application definition and additional parameters link definition revision. @@ -557,6 +596,9 @@ message ApplicationStatus { // SourceTypes specifies the type of the sources included in the application repeated string sourceTypes = 12; + + // ControllerNamespace indicates the namespace in which the application controller is located + optional string controllerNamespace = 13; } // ApplicationSummary contains information about URLs and container images used by an application @@ -614,6 +656,23 @@ message BasicAuthBitbucketServer { optional SecretRef passwordRef = 2; } +// BearerTokenBitbucketCloud defines the Bearer token for BitBucket AppToken auth. +message BearerTokenBitbucketCloud { + // Password (or personal access token) reference. + optional SecretRef tokenRef = 1; +} + +// ChartDetails contains helm chart metadata for a specific version +message ChartDetails { + optional string description = 1; + + // The URL of this projects home page, e.g. "http://example.com" + optional string home = 2; + + // List of maintainer details, name and email, e.g. ["John Doe "] + repeated string maintainers = 3; +} + // Cluster is the definition of a cluster resource message Cluster { // Server is the API server URL of the Kubernetes cluster @@ -748,6 +807,9 @@ message ComparedTo { // Sources is a reference to the application's multiple sources used for comparison repeated ApplicationSource sources = 3; + + // IgnoreDifferences is a reference to the application's ignored differences used for comparison + repeated ResourceIgnoreDifferences ignoreDifferences = 4; } // ComponentParameter contains information about component parameter value @@ -854,6 +916,9 @@ message GitGenerator { optional ApplicationSetTemplate template = 6; optional string pathParamPrefix = 7; + + // Values contains key/value pairs which are passed directly as parameters to the template + map values = 8; } // GnuPGPublicKey is a representation of a GnuPG public key @@ -997,11 +1062,21 @@ message KustomizeOptions { optional string binaryPath = 2; } +message KustomizeReplica { + // Name of Deployment or StatefulSet + optional string name = 1; + + // Number of replicas + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString count = 2; +} + // ListGenerator include items info message ListGenerator { repeated k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON elements = 1; optional ApplicationSetTemplate template = 2; + + optional string elementsYaml = 3; } message ManagedNamespaceMetadata { @@ -1108,6 +1183,18 @@ message OperationState { optional int64 retryCount = 8; } +message OptionalArray { + // Array is the value of an array type parameter. + // +optional + repeated string array = 1; +} + +message OptionalMap { + // Map is the value of a map type parameter. + // +optional + map map = 1; +} + // OrphanedResourceKey is a reference to a resource to be ignored from message OrphanedResourceKey { optional string group = 1; @@ -1140,6 +1227,33 @@ message OverrideIgnoreDiff { repeated string managedFieldsManagers = 3; } +message PluginConfigMapRef { + // Name of the ConfigMap + optional string name = 1; +} + +// PluginGenerator defines connection info specific to Plugin. +message PluginGenerator { + optional PluginConfigMapRef configMapRef = 1; + + optional PluginInput input = 2; + + // RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again. + optional int64 requeueAfterSeconds = 3; + + optional ApplicationSetTemplate template = 4; + + // Values contains key/value pairs which are passed directly as parameters to the template. These values will not be + // sent as parameters to the plugin. + map values = 5; +} + +message PluginInput { + // Parameters contains the information to pass to the plugin. It is a map. The keys must be strings, and the + // values can be any type. + map parameters = 1; +} + // ProjectRole represents a role that has access to a project message ProjectRole { // Name is a name for this role @@ -1176,9 +1290,53 @@ message PullRequestGenerator { optional int64 requeueAfterSeconds = 6; optional ApplicationSetTemplate template = 7; + + optional PullRequestGeneratorBitbucket bitbucket = 8; + + // Additional provider to use and config for it. + optional PullRequestGeneratorAzureDevOps azuredevops = 9; } -// PullRequestGenerator defines connection info specific to BitbucketServer. +// PullRequestGeneratorAzureDevOps defines connection info specific to AzureDevOps. +message PullRequestGeneratorAzureDevOps { + // Azure DevOps org to scan. Required. + optional string organization = 1; + + // Azure DevOps project name to scan. Required. + optional string project = 2; + + // Azure DevOps repo name to scan. Required. + optional string repo = 3; + + // The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/. + optional string api = 4; + + // Authentication token reference. + optional SecretRef tokenRef = 5; + + // Labels is used to filter the PRs that you want to target + repeated string labels = 6; +} + +// PullRequestGeneratorBitbucket defines connection info specific to Bitbucket. +message PullRequestGeneratorBitbucket { + // Workspace to scan. Required. + optional string owner = 1; + + // Repo name to scan. Required. + optional string repo = 2; + + // The Bitbucket REST API URL to talk to. If blank, uses https://api.bitbucket.org/2.0. + optional string api = 3; + + // Credentials for Basic auth + optional BasicAuthBitbucketServer basicAuth = 4; + + // Credentials for AppToken (Bearer auth) + optional BearerTokenBitbucketCloud bearerToken = 5; +} + +// PullRequestGeneratorBitbucketServer defines connection info specific to BitbucketServer. message PullRequestGeneratorBitbucketServer { // Project to scan. Required. optional string project = 1; @@ -1198,6 +1356,8 @@ message PullRequestGeneratorBitbucketServer { // pass for a pull request to be included. message PullRequestGeneratorFilter { optional string branchMatch = 1; + + optional string targetBranchMatch = 2; } // PullRequestGeneratorGitLab defines connection info specific to GitLab. @@ -1216,9 +1376,12 @@ message PullRequestGeneratorGitLab { // PullRequestState is an additional MRs filter to get only those with a certain state. Default: "" (all states) optional string pullRequestState = 5; + + // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false + optional bool insecure = 6; } -// PullRequestGenerator defines connection info specific to Gitea. +// PullRequestGeneratorGitea defines connection info specific to Gitea. message PullRequestGeneratorGitea { // Gitea org or user to scan. Required. optional string owner = 1; @@ -1431,6 +1594,10 @@ message ResourceAction { repeated ResourceActionParam params = 2; optional bool disabled = 3; + + optional string iconClass = 4; + + optional string displayName = 5; } // TODO: describe this type @@ -1560,6 +1727,8 @@ message ResourceOverride { optional OverrideIgnoreDiff ignoreDifferences = 2; + optional OverrideIgnoreDiff ignoreResourceUpdates = 6; + repeated KnownTypeField knownTypeFields = 4; } @@ -1716,6 +1885,28 @@ message SCMProviderGenerator { optional int64 requeueAfterSeconds = 9; optional ApplicationSetTemplate template = 10; + + // Values contains key/value pairs which are passed directly as parameters to the template + map values = 11; + + optional SCMProviderGeneratorAWSCodeCommit awsCodeCommit = 12; +} + +// SCMProviderGeneratorAWSCodeCommit defines connection info specific to AWS CodeCommit. +message SCMProviderGeneratorAWSCodeCommit { + // TagFilters provides the tag filter(s) for repo discovery + repeated TagFilter tagFilters = 1; + + // Role provides the AWS IAM role to assume, for cross-account repo discovery + // if not provided, AppSet controller will use its pod/node identity to discover. + optional string role = 2; + + // Region provides the AWS region to discover repos. + // if not provided, AppSet controller will infer the current region from environment. + optional string region = 3; + + // Scan all branches instead of just the default branch. + optional bool allBranches = 4; } // SCMProviderGeneratorAzureDevOps defines connection info specific to Azure DevOps. @@ -1838,6 +2029,9 @@ message SCMProviderGeneratorGitlab { // Scan all branches instead of just the default branch. optional bool allBranches = 5; + + // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false + optional bool insecure = 6; } // Utility struct for a reference to a secret key. @@ -1917,6 +2111,9 @@ message SyncOperationResult { // Revisions holds the revision this sync operation was performed for respective indexed source in sources field repeated string revisions = 5; + + // ManagedNamespaceMetadata contains the current sync state of managed namespace metadata + optional ManagedNamespaceMetadata managedNamespaceMetadata = 6; } // SyncPolicy controls when a sync will be performed in response to updates in git @@ -1939,7 +2136,7 @@ message SyncPolicyAutomated { // Prune specifies whether to delete resources from the cluster that are not found in the sources anymore as part of automated sync (default: false) optional bool prune = 1; - // SelfHeal specifes whether to revert resources back to their desired state upon modification in the cluster (default: false) + // SelfHeal specifies whether to revert resources back to their desired state upon modification in the cluster (default: false) optional bool selfHeal = 2; // AllowEmpty allows apps have zero live resources (default: false) @@ -2036,3 +2233,9 @@ message TLSClientConfig { optional bytes caData = 5; } +message TagFilter { + optional string key = 1; + + optional string value = 2; +} + diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go index e9c422a9fd..3caf488c4b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go @@ -24,6 +24,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination": schema_pkg_apis_application_v1alpha1_ApplicationDestination(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationList": schema_pkg_apis_application_v1alpha1_ApplicationList(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationMatchExpression": schema_pkg_apis_application_v1alpha1_ApplicationMatchExpression(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields": schema_pkg_apis_application_v1alpha1_ApplicationPreservedFields(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSet": schema_pkg_apis_application_v1alpha1_ApplicationSet(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetApplicationStatus": schema_pkg_apis_application_v1alpha1_ApplicationSetApplicationStatus(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetCondition": schema_pkg_apis_application_v1alpha1_ApplicationSetCondition(ref), @@ -53,6 +54,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationWatchEvent": schema_pkg_apis_application_v1alpha1_ApplicationWatchEvent(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Backoff": schema_pkg_apis_application_v1alpha1_Backoff(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BasicAuthBitbucketServer": schema_pkg_apis_application_v1alpha1_BasicAuthBitbucketServer(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BearerTokenBitbucketCloud": schema_pkg_apis_application_v1alpha1_BearerTokenBitbucketCloud(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ChartDetails": schema_pkg_apis_application_v1alpha1_ChartDetails(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Cluster": schema_pkg_apis_application_v1alpha1_Cluster(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterCacheInfo": schema_pkg_apis_application_v1alpha1_ClusterCacheInfo(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterConfig": schema_pkg_apis_application_v1alpha1_ClusterConfig(ref), @@ -85,6 +88,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JsonnetVar": schema_pkg_apis_application_v1alpha1_JsonnetVar(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KnownTypeField": schema_pkg_apis_application_v1alpha1_KnownTypeField(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeOptions": schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica": schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator": schema_pkg_apis_application_v1alpha1_ListGenerator(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ManagedNamespaceMetadata": schema_pkg_apis_application_v1alpha1_ManagedNamespaceMetadata(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MatrixGenerator": schema_pkg_apis_application_v1alpha1_MatrixGenerator(ref), @@ -94,11 +98,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Operation": schema_pkg_apis_application_v1alpha1_Operation(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationInitiator": schema_pkg_apis_application_v1alpha1_OperationInitiator(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationState": schema_pkg_apis_application_v1alpha1_OperationState(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalArray": schema_pkg_apis_application_v1alpha1_OptionalArray(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalMap": schema_pkg_apis_application_v1alpha1_OptionalMap(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourceKey": schema_pkg_apis_application_v1alpha1_OrphanedResourceKey(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourcesMonitorSettings": schema_pkg_apis_application_v1alpha1_OrphanedResourcesMonitorSettings(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OverrideIgnoreDiff": schema_pkg_apis_application_v1alpha1_OverrideIgnoreDiff(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginConfigMapRef": schema_pkg_apis_application_v1alpha1_PluginConfigMapRef(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator": schema_pkg_apis_application_v1alpha1_PluginGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginInput": schema_pkg_apis_application_v1alpha1_PluginInput(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ProjectRole": schema_pkg_apis_application_v1alpha1_ProjectRole(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator": schema_pkg_apis_application_v1alpha1_PullRequestGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorAzureDevOps(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucket(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucketServer(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorFilter": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorFilter(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitLab": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitLab(ref), @@ -127,6 +138,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionHistory": schema_pkg_apis_application_v1alpha1_RevisionHistory(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionMetadata": schema_pkg_apis_application_v1alpha1_RevisionMetadata(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator": schema_pkg_apis_application_v1alpha1_SCMProviderGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAWSCodeCommit": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAWSCodeCommit(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAzureDevOps(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucket(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucketServer(ref), @@ -147,6 +159,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyHook": schema_pkg_apis_application_v1alpha1_SyncStrategyHook(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncWindow": schema_pkg_apis_application_v1alpha1_SyncWindow(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TLSClientConfig": schema_pkg_apis_application_v1alpha1_TLSClientConfig(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TagFilter": schema_pkg_apis_application_v1alpha1_TagFilter(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.objectMeta": schema_pkg_apis_application_v1alpha1_objectMeta(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.rawResourceOverride": schema_pkg_apis_application_v1alpha1_rawResourceOverride(ref), } @@ -539,7 +552,7 @@ func schema_pkg_apis_application_v1alpha1_ApplicationCondition(ref common.Refere return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ApplicationCondition contains details about an application condition, which is usally an error or warning", + Description: "ApplicationCondition contains details about an application condition, which is usually an error or warning", Type: []string{"object"}, Properties: map[string]spec.Schema{ "type": { @@ -694,6 +707,32 @@ func schema_pkg_apis_application_v1alpha1_ApplicationMatchExpression(ref common. } } +func schema_pkg_apis_application_v1alpha1_ApplicationPreservedFields(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "annotations": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_ApplicationSet(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -903,11 +942,16 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetGenerator(ref common.Ref Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, + "plugin": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MatrixGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MergeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MatrixGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MergeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } @@ -1015,11 +1059,16 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetNestedGenerator(ref comm Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, + "plugin": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } @@ -1124,12 +1173,38 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetSpec(ref common.Referenc Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy"), }, }, + "preservedFields": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields"), + }, + }, + "goTemplateOptions": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "applyNestedSelectors": { + SchemaProps: spec.SchemaProps{ + Description: "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"generators", "template"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"}, } } @@ -1215,6 +1290,13 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetSyncPolicy(ref common.Re Format: "", }, }, + "applicationsSync": { + SchemaProps: spec.SchemaProps{ + Description: "ApplicationsSync represents the policy applied on the generated applications. Possible values are create-only, create-update, create-delete, sync", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -1355,11 +1437,22 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetTerminalGenerator(ref co Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator"), }, }, + "plugin": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator"), + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Selector allows to post-filter all generator.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } @@ -1526,8 +1619,13 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourceHelm(ref common.Refer }, }, "values": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-strategy": "replace", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "Values specifies Helm values to be passed to helm template, typically defined as a block", + Description: "Values specifies Helm values to be passed to helm template, typically defined as a block. ValuesObject takes precedence over Values, so use one or the other.", Type: []string{"string"}, Format: "", }, @@ -1574,11 +1672,17 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourceHelm(ref common.Refer Format: "", }, }, + "valuesObject": { + SchemaProps: spec.SchemaProps{ + Description: "ValuesObject specifies Helm values to be passed to helm template, defined as a map. This takes precedence over Values.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmFileParameter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmParameter"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmFileParameter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmParameter", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -1729,9 +1833,39 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourceKustomize(ref common. Format: "", }, }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace sets the namespace that Kustomize adds to all resources", + Type: []string{"string"}, + Format: "", + }, + }, + "commonAnnotationsEnvsubst": { + SchemaProps: spec.SchemaProps{ + Description: "CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values", + Type: []string{"boolean"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is a list of Kustomize Replicas override specifications", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica"), + }, + }, + }, + }, + }, }, }, }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica"}, } } @@ -1801,37 +1935,6 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourcePluginParameter(ref c Format: "", }, }, - "map": { - SchemaProps: spec.SchemaProps{ - Description: "Map is the value of a map type parameter.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "array": { - SchemaProps: spec.SchemaProps{ - Description: "Array is the value of an array type parameter.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, }, }, }, @@ -2047,6 +2150,13 @@ func schema_pkg_apis_application_v1alpha1_ApplicationStatus(ref common.Reference }, }, }, + "controllerNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "ControllerNamespace indicates the namespace in which the application controller is located", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -2249,6 +2359,69 @@ func schema_pkg_apis_application_v1alpha1_BasicAuthBitbucketServer(ref common.Re } } +func schema_pkg_apis_application_v1alpha1_BearerTokenBitbucketCloud(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BearerTokenBitbucketCloud defines the Bearer token for BitBucket AppToken auth.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "tokenRef": { + SchemaProps: spec.SchemaProps{ + Description: "Password (or personal access token) reference.", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef"), + }, + }, + }, + Required: []string{"tokenRef"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef"}, + } +} + +func schema_pkg_apis_application_v1alpha1_ChartDetails(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ChartDetails contains helm chart metadata for a specific version", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "description": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "home": { + SchemaProps: spec.SchemaProps{ + Description: "The URL of this projects home page, e.g. \"http://example.com\"", + Type: []string{"string"}, + Format: "", + }, + }, + "maintainers": { + SchemaProps: spec.SchemaProps{ + Description: "List of maintainer details, name and email, e.g. [\"John Doe \"]", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_Cluster(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2688,12 +2861,26 @@ func schema_pkg_apis_application_v1alpha1_ComparedTo(ref common.ReferenceCallbac }, }, }, + "ignoreDifferences": { + SchemaProps: spec.SchemaProps{ + Description: "IgnoreDifferences is a reference to the application's ignored differences used for comparison", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences"), + }, + }, + }, + }, + }, }, Required: []string{"destination"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences"}, } } @@ -3078,6 +3265,22 @@ func schema_pkg_apis_application_v1alpha1_GitGenerator(ref common.ReferenceCallb Format: "", }, }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "Values contains key/value pairs which are passed directly as parameters to the template", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"repoURL", "revision"}, }, @@ -3575,6 +3778,36 @@ func schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref common.ReferenceC } } +func schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of Deployment or StatefulSet", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "count": { + SchemaProps: spec.SchemaProps{ + Description: "Number of replicas", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + }, + Required: []string{"name", "count"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + func schema_pkg_apis_application_v1alpha1_ListGenerator(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3601,6 +3834,12 @@ func schema_pkg_apis_application_v1alpha1_ListGenerator(ref common.ReferenceCall Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"), }, }, + "elementsYaml": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"elements"}, }, @@ -3947,20 +4186,75 @@ func schema_pkg_apis_application_v1alpha1_OperationState(ref common.ReferenceCal } } -func schema_pkg_apis_application_v1alpha1_OrphanedResourceKey(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_application_v1alpha1_OptionalArray(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "OrphanedResourceKey is a reference to a resource to be ignored from", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { + "array": { + SchemaProps: spec.SchemaProps{ + Description: "Array is the value of an array type parameter.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_application_v1alpha1_OptionalMap(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "map": { + SchemaProps: spec.SchemaProps{ + Description: "Map is the value of a map type parameter.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_application_v1alpha1_OrphanedResourceKey(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OrphanedResourceKey is a reference to a resource to be ignored from", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", @@ -4073,6 +4367,113 @@ func schema_pkg_apis_application_v1alpha1_OverrideIgnoreDiff(ref common.Referenc } } +func schema_pkg_apis_application_v1alpha1_PluginConfigMapRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the ConfigMap", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_application_v1alpha1_PluginGenerator(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PluginGenerator defines connection info specific to Plugin.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMapRef": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginConfigMapRef"), + }, + }, + "input": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginInput"), + }, + }, + "requeueAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"), + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "Values contains key/value pairs which are passed directly as parameters to the template. These values will not be sent as parameters to the plugin.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"configMapRef"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginConfigMapRef", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginInput"}, + } +} + +func schema_pkg_apis_application_v1alpha1_PluginInput(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "parameters": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters contains the information to pass to the plugin. It is a map. The keys must be strings, and the values can be any type.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"}, + } +} + func schema_pkg_apis_application_v1alpha1_ProjectRole(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4203,11 +4604,141 @@ func schema_pkg_apis_application_v1alpha1_PullRequestGenerator(ref common.Refere Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"), }, }, + "bitbucket": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucket"), + }, + }, + "azuredevops": { + SchemaProps: spec.SchemaProps{ + Description: "Additional provider to use and config for it.", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorAzureDevOps"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucketServer", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorFilter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitLab", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitea", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGithub"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorAzureDevOps", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucket", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucketServer", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorFilter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitLab", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitea", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGithub"}, + } +} + +func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorAzureDevOps(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PullRequestGeneratorAzureDevOps defines connection info specific to AzureDevOps.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "organization": { + SchemaProps: spec.SchemaProps{ + Description: "Azure DevOps org to scan. Required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "project": { + SchemaProps: spec.SchemaProps{ + Description: "Azure DevOps project name to scan. Required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "repo": { + SchemaProps: spec.SchemaProps{ + Description: "Azure DevOps repo name to scan. Required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "api": { + SchemaProps: spec.SchemaProps{ + Description: "The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/.", + Type: []string{"string"}, + Format: "", + }, + }, + "tokenRef": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication token reference.", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef"), + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Description: "Labels is used to filter the PRs that you want to target", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"organization", "project", "repo"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef"}, + } +} + +func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucket(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PullRequestGeneratorBitbucket defines connection info specific to Bitbucket.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "owner": { + SchemaProps: spec.SchemaProps{ + Description: "Workspace to scan. Required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "repo": { + SchemaProps: spec.SchemaProps{ + Description: "Repo name to scan. Required.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "api": { + SchemaProps: spec.SchemaProps{ + Description: "The Bitbucket REST API URL to talk to. If blank, uses https://api.bitbucket.org/2.0.", + Type: []string{"string"}, + Format: "", + }, + }, + "basicAuth": { + SchemaProps: spec.SchemaProps{ + Description: "Credentials for Basic auth", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BasicAuthBitbucketServer"), + }, + }, + "bearerToken": { + SchemaProps: spec.SchemaProps{ + Description: "Credentials for AppToken (Bearer auth)", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BearerTokenBitbucketCloud"), + }, + }, + }, + Required: []string{"owner", "repo"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BasicAuthBitbucketServer", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BearerTokenBitbucketCloud"}, } } @@ -4215,7 +4746,7 @@ func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucketServer(re return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PullRequestGenerator defines connection info specific to BitbucketServer.", + Description: "PullRequestGeneratorBitbucketServer defines connection info specific to BitbucketServer.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "project": { @@ -4270,6 +4801,12 @@ func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorFilter(ref common. Format: "", }, }, + "targetBranchMatch": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -4326,6 +4863,13 @@ func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitLab(ref common. Format: "", }, }, + "insecure": { + SchemaProps: spec.SchemaProps{ + Description: "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"project"}, }, @@ -4339,7 +4883,7 @@ func schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitea(ref common.R return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PullRequestGenerator defines connection info specific to Gitea.", + Description: "PullRequestGeneratorGitea defines connection info specific to Gitea.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "owner": { @@ -4970,6 +5514,18 @@ func schema_pkg_apis_application_v1alpha1_ResourceAction(ref common.ReferenceCal Format: "", }, }, + "iconClass": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -5478,6 +6034,12 @@ func schema_pkg_apis_application_v1alpha1_ResourceOverride(ref common.ReferenceC Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OverrideIgnoreDiff"), }, }, + "IgnoreResourceUpdates": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OverrideIgnoreDiff"), + }, + }, "KnownTypeFields": { SchemaProps: spec.SchemaProps{ Type: []string{"array"}, @@ -5492,7 +6054,7 @@ func schema_pkg_apis_application_v1alpha1_ResourceOverride(ref common.ReferenceC }, }, }, - Required: []string{"HealthLua", "UseOpenLibs", "Actions", "IgnoreDifferences", "KnownTypeFields"}, + Required: []string{"HealthLua", "UseOpenLibs", "Actions", "IgnoreDifferences", "IgnoreResourceUpdates", "KnownTypeFields"}, }, }, Dependencies: []string{ @@ -5951,11 +6513,81 @@ func schema_pkg_apis_application_v1alpha1_SCMProviderGenerator(ref common.Refere Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"), }, }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "Values contains key/value pairs which are passed directly as parameters to the template", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "awsCodeCommit": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAWSCodeCommit"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAzureDevOps", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucket", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucketServer", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorFilter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitea", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGithub", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitlab"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAWSCodeCommit", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAzureDevOps", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucket", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucketServer", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorFilter", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitea", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGithub", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitlab"}, + } +} + +func schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAWSCodeCommit(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SCMProviderGeneratorAWSCodeCommit defines connection info specific to AWS CodeCommit.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "tagFilters": { + SchemaProps: spec.SchemaProps{ + Description: "TagFilters provides the tag filter(s) for repo discovery", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TagFilter"), + }, + }, + }, + }, + }, + "role": { + SchemaProps: spec.SchemaProps{ + Description: "Role provides the AWS IAM role to assume, for cross-account repo discovery if not provided, AppSet controller will use its pod/node identity to discover.", + Type: []string{"string"}, + Format: "", + }, + }, + "region": { + SchemaProps: spec.SchemaProps{ + Description: "Region provides the AWS region to discover repos. if not provided, AppSet controller will infer the current region from environment.", + Type: []string{"string"}, + Format: "", + }, + }, + "allBranches": { + SchemaProps: spec.SchemaProps{ + Description: "Scan all branches instead of just the default branch.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TagFilter"}, } } @@ -6310,6 +6942,13 @@ func schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitlab(ref common. Format: "", }, }, + "insecure": { + SchemaProps: spec.SchemaProps{ + Description: "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"group"}, }, @@ -6594,12 +7233,18 @@ func schema_pkg_apis_application_v1alpha1_SyncOperationResult(ref common.Referen }, }, }, + "managedNamespaceMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "ManagedNamespaceMetadata contains the current sync state of managed namespace metadata", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ManagedNamespaceMetadata"), + }, + }, }, Required: []string{"revision"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceResult"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ManagedNamespaceMetadata", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceResult"}, } } @@ -6667,7 +7312,7 @@ func schema_pkg_apis_application_v1alpha1_SyncPolicyAutomated(ref common.Referen }, "selfHeal": { SchemaProps: spec.SchemaProps{ - Description: "SelfHeal specifes whether to revert resources back to their desired state upon modification in the cluster (default: false)", + Description: "SelfHeal specifies whether to revert resources back to their desired state upon modification in the cluster (default: false)", Type: []string{"boolean"}, Format: "", }, @@ -6948,6 +7593,32 @@ func schema_pkg_apis_application_v1alpha1_TLSClientConfig(ref common.ReferenceCa } } +func schema_pkg_apis_application_v1alpha1_TagFilter(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_objectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6998,6 +7669,12 @@ func schema_pkg_apis_application_v1alpha1_rawResourceOverride(ref common.Referen Format: "", }, }, + "ignoreResourceUpdates": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, "knownTypeFields": { SchemaProps: spec.SchemaProps{ Type: []string{"array"}, diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go index 67fb263510..2eb5166c7d 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go @@ -18,7 +18,6 @@ import ( "github.com/argoproj/gitops-engine/pkg/health" synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" - "github.com/ghodss/yaml" "github.com/robfig/cron/v3" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -26,13 +25,19 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/util/env" + + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/util/collections" "github.com/argoproj/argo-cd/v2/util/helm" "github.com/argoproj/argo-cd/v2/util/security" @@ -66,7 +71,7 @@ type ApplicationSpec struct { // SyncPolicy controls when and how a sync will be performed SyncPolicy *SyncPolicy `json:"syncPolicy,omitempty" protobuf:"bytes,4,name=syncPolicy"` // IgnoreDifferences is a list of resources and their fields which should be ignored during comparison - IgnoreDifferences []ResourceIgnoreDifferences `json:"ignoreDifferences,omitempty" protobuf:"bytes,5,name=ignoreDifferences"` + IgnoreDifferences IgnoreDifferences `json:"ignoreDifferences,omitempty" protobuf:"bytes,5,name=ignoreDifferences"` // Info contains a list of information (URLs, email addresses, and plain text) that relates to the application Info []Info `json:"info,omitempty" protobuf:"bytes,6,name=info"` // RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for informational purposes as well as for rollbacks to previous versions. @@ -80,6 +85,12 @@ type ApplicationSpec struct { Sources ApplicationSources `json:"sources,omitempty" protobuf:"bytes,8,opt,name=sources"` } +type IgnoreDifferences []ResourceIgnoreDifferences + +func (id IgnoreDifferences) Equals(other IgnoreDifferences) bool { + return reflect.DeepEqual(id, other) +} + type TrackingMethod string // ResourceIgnoreDifferences contains resource filter and list of json paths which should be ignored during comparison with live state. @@ -187,7 +198,7 @@ func (s ApplicationSources) Equals(other ApplicationSources) bool { return false } for i := range s { - if !s[i].Equals(other[i]) { + if !s[i].Equals(&other[i]) { return false } } @@ -296,8 +307,9 @@ type ApplicationSourceHelm struct { Parameters []HelmParameter `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` // ReleaseName is the Helm release name to use. If omitted it will use the application name ReleaseName string `json:"releaseName,omitempty" protobuf:"bytes,3,opt,name=releaseName"` - // Values specifies Helm values to be passed to helm template, typically defined as a block - Values string `json:"values,omitempty" protobuf:"bytes,4,opt,name=values"` + // Values specifies Helm values to be passed to helm template, typically defined as a block. ValuesObject takes precedence over Values, so use one or the other. + // +patchStrategy=replace + Values string `json:"values,omitempty" patchStrategy:"replace" protobuf:"bytes,4,opt,name=values"` // FileParameters are file parameters to the helm template FileParameters []HelmFileParameter `json:"fileParameters,omitempty" protobuf:"bytes,5,opt,name=fileParameters"` // Version is the Helm version to use for templating ("3") @@ -308,6 +320,9 @@ type ApplicationSourceHelm struct { IgnoreMissingValueFiles bool `json:"ignoreMissingValueFiles,omitempty" protobuf:"bytes,8,opt,name=ignoreMissingValueFiles"` // SkipCrds skips custom resource definition installation step (Helm's --skip-crds) SkipCrds bool `json:"skipCrds,omitempty" protobuf:"bytes,9,opt,name=skipCrds"` + // ValuesObject specifies Helm values to be passed to helm template, defined as a map. This takes precedence over Values. + // +kubebuilder:pruning:PreserveUnknownFields + ValuesObject *runtime.RawExtension `json:"valuesObject,omitempty" protobuf:"bytes,10,opt,name=valuesObject"` } // HelmParameter is a parameter that's passed to helm template during manifest generation @@ -389,7 +404,7 @@ func (in *ApplicationSourceHelm) AddFileParameter(p HelmFileParameter) { // IsZero Returns true if the Helm options in an application source are considered zero func (h *ApplicationSourceHelm) IsZero() bool { - return h == nil || (h.Version == "") && (h.ReleaseName == "") && len(h.ValueFiles) == 0 && len(h.Parameters) == 0 && len(h.FileParameters) == 0 && h.Values == "" && !h.PassCredentials && !h.IgnoreMissingValueFiles && !h.SkipCrds + return h == nil || (h.Version == "") && (h.ReleaseName == "") && len(h.ValueFiles) == 0 && len(h.Parameters) == 0 && len(h.FileParameters) == 0 && h.ValuesIsEmpty() && !h.PassCredentials && !h.IgnoreMissingValueFiles && !h.SkipCrds } // KustomizeImage represents a Kustomize image definition in the format [old_image_name=]: @@ -444,6 +459,54 @@ type ApplicationSourceKustomize struct { ForceCommonLabels bool `json:"forceCommonLabels,omitempty" protobuf:"bytes,7,opt,name=forceCommonLabels"` // ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps ForceCommonAnnotations bool `json:"forceCommonAnnotations,omitempty" protobuf:"bytes,8,opt,name=forceCommonAnnotations"` + // Namespace sets the namespace that Kustomize adds to all resources + Namespace string `json:"namespace,omitempty" protobuf:"bytes,9,opt,name=namespace"` + // CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values + CommonAnnotationsEnvsubst bool `json:"commonAnnotationsEnvsubst,omitempty" protobuf:"bytes,10,opt,name=commonAnnotationsEnvsubst"` + // Replicas is a list of Kustomize Replicas override specifications + Replicas KustomizeReplicas `json:"replicas,omitempty" protobuf:"bytes,11,opt,name=replicas"` +} + +type KustomizeReplica struct { + // Name of Deployment or StatefulSet + Name string `json:"name" protobuf:"bytes,1,name=name"` + // Number of replicas + Count intstr.IntOrString `json:"count" protobuf:"bytes,2,name=count"` +} + +type KustomizeReplicas []KustomizeReplica + +// GetIntCount returns Count converted to int. +// If parsing error occurs, returns 0 and error. +func (kr KustomizeReplica) GetIntCount() (int, error) { + if kr.Count.Type == intstr.String { + if count, err := strconv.Atoi(kr.Count.StrVal); err != nil { + return 0, fmt.Errorf("expected integer value for count. Received: %s", kr.Count.StrVal) + } else { + return count, nil + } + } else { + return kr.Count.IntValue(), nil + } +} + +// NewKustomizeReplica parses a string in format name=count into a KustomizeReplica object and returns it +func NewKustomizeReplica(text string) (*KustomizeReplica, error) { + parts := strings.SplitN(text, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("expected parameter of the form: name=count. Received: %s", text) + } + + kr := &KustomizeReplica{ + Name: parts[0], + Count: intstr.Parse(parts[1]), + } + + if _, err := kr.GetIntCount(); err != nil { + return nil, err + } + + return kr, nil } // AllowsConcurrentProcessing returns true if multiple processes can run Kustomize builds on the same source at the same time @@ -452,6 +515,7 @@ func (k *ApplicationSourceKustomize) AllowsConcurrentProcessing() bool { len(k.CommonLabels) == 0 && len(k.CommonAnnotations) == 0 && k.NamePrefix == "" && + k.Namespace == "" && k.NameSuffix == "" } @@ -461,7 +525,9 @@ func (k *ApplicationSourceKustomize) IsZero() bool { k.NamePrefix == "" && k.NameSuffix == "" && k.Version == "" && + k.Namespace == "" && len(k.Images) == 0 && + len(k.Replicas) == 0 && len(k.CommonLabels) == 0 && len(k.CommonAnnotations) == 0 } @@ -476,6 +542,26 @@ func (k *ApplicationSourceKustomize) MergeImage(image KustomizeImage) { } } +// MergeReplicas merges a new Kustomize replica identifier in to a list of replicas +func (k *ApplicationSourceKustomize) MergeReplica(replica KustomizeReplica) { + i := k.Replicas.FindByName(replica.Name) + if i >= 0 { + k.Replicas[i] = replica + } else { + k.Replicas = append(k.Replicas, replica) + } +} + +// Find returns a positive integer representing the index in the list of replicas +func (rs KustomizeReplicas) FindByName(name string) int { + for i, r := range rs { + if r.Name == name { + return i + } + } + return -1 +} + // JsonnetVar represents a variable to be passed to jsonnet during manifest generation type JsonnetVar struct { Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -525,19 +611,155 @@ func (d *ApplicationSourceDirectory) IsZero() bool { return d == nil || !d.Recurse && d.Jsonnet.IsZero() } +type OptionalMap struct { + // Map is the value of a map type parameter. + // +optional + Map map[string]string `json:"map" protobuf:"bytes,1,rep,name=map"` + // We need the explicit +optional so that kube-builder generates the CRD without marking this as required. +} + +// Equals returns true if the two OptionalMap objects are equal. We can't use reflect.DeepEqual because it will return +// false if one of the maps is nil and the other is an empty map. This is because the JSON unmarshaller will set the +// map to nil if it is empty, but the protobuf unmarshaller will set it to an empty map. +func (o *OptionalMap) Equals(other *OptionalMap) bool { + if o == nil && other == nil { + return true + } + if o == nil || other == nil { + return false + } + if len(o.Map) != len(other.Map) { + return false + } + if o.Map == nil && other.Map == nil { + return true + } + // The next two blocks are critical. Depending on whether the struct was populated from JSON or protobufs, the map + // field will be either nil or an empty map. They mean the same thing: the map is empty. + if o.Map == nil && len(other.Map) == 0 { + return true + } + if other.Map == nil && len(o.Map) == 0 { + return true + } + return reflect.DeepEqual(o.Map, other.Map) +} + +type OptionalArray struct { + // Array is the value of an array type parameter. + // +optional + Array []string `json:"array" protobuf:"bytes,1,rep,name=array"` + // We need the explicit +optional so that kube-builder generates the CRD without marking this as required. +} + +// Equals returns true if the two OptionalArray objects are equal. We can't use reflect.DeepEqual because it will return +// false if one of the arrays is nil and the other is an empty array. This is because the JSON unmarshaller will set the +// array to nil if it is empty, but the protobuf unmarshaller will set it to an empty array. +func (o *OptionalArray) Equals(other *OptionalArray) bool { + if o == nil && other == nil { + return true + } + if o == nil || other == nil { + return false + } + if len(o.Array) != len(other.Array) { + return false + } + if o.Array == nil && other.Array == nil { + return true + } + // The next two blocks are critical. Depending on whether the struct was populated from JSON or protobufs, the array + // field will be either nil or an empty array. They mean the same thing: the array is empty. + if o.Array == nil && len(other.Array) == 0 { + return true + } + if other.Array == nil && len(o.Array) == 0 { + return true + } + return reflect.DeepEqual(o.Array, other.Array) +} + type ApplicationSourcePluginParameter struct { + // We use pointers to structs because go-to-protobuf represents pointers to arrays/maps as repeated fields. + // These repeated fields have no way to represent "present but empty." So we would have no way to distinguish + // {name: parameters, array: []} from {name: parameter} + // By wrapping the array/map in a struct, we can use a pointer to the struct to represent "present but empty." + // Name is the name identifying a parameter. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // String_ is the value of a string type parameter. String_ *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"` // Map is the value of a map type parameter. - Map map[string]string `json:"map,omitempty" protobuf:"bytes,3,rep,name=map"` + *OptionalMap `json:",omitempty" protobuf:"bytes,3,rep,name=map"` // Array is the value of an array type parameter. - Array []string `json:"array,omitempty" protobuf:"bytes,4,rep,name=array"` + *OptionalArray `json:",omitempty" protobuf:"bytes,4,rep,name=array"` +} + +func (p ApplicationSourcePluginParameter) Equals(other ApplicationSourcePluginParameter) bool { + if p.Name != other.Name { + return false + } + if !reflect.DeepEqual(p.String_, other.String_) { + return false + } + return p.OptionalMap.Equals(other.OptionalMap) && p.OptionalArray.Equals(other.OptionalArray) +} + +// MarshalJSON is a custom JSON marshaller for ApplicationSourcePluginParameter. We need this custom marshaler because, +// when ApplicationSourcePluginParameter is unmarshaled, either from JSON or protobufs, the fields inside OptionalMap and +// OptionalArray are not set. The default JSON marshaler marshals these as "null." But really what we want to represent +// is an empty map or array. +// +// There are efforts to change things upstream, but nothing has been merged yet. See https://github.com/golang/go/issues/37711 +func (p ApplicationSourcePluginParameter) MarshalJSON() ([]byte, error) { + out := map[string]interface{}{} + out["name"] = p.Name + if p.String_ != nil { + out["string"] = p.String_ + } + if p.OptionalMap != nil { + if p.OptionalMap.Map == nil { + // Nil is not the same as a nil map. Nil means the field was not set, while a nil map means the field was set to an empty map. + // Either way, we want to marshal it as "{}". + out["map"] = map[string]string{} + } else { + out["map"] = p.OptionalMap.Map + } + } + if p.OptionalArray != nil { + if p.OptionalArray.Array == nil { + // Nil is not the same as a nil array. Nil means the field was not set, while a nil array means the field was set to an empty array. + // Either way, we want to marshal it as "[]". + out["array"] = []string{} + } else { + out["array"] = p.OptionalArray.Array + } + } + bytes, err := json.Marshal(out) + if err != nil { + return nil, err + } + return bytes, nil } type ApplicationSourcePluginParameters []ApplicationSourcePluginParameter +func (p ApplicationSourcePluginParameters) Equals(other ApplicationSourcePluginParameters) bool { + if len(p) != len(other) { + return false + } + for i := range p { + if !p[i].Equals(other[i]) { + return false + } + } + return true +} + +func (p ApplicationSourcePluginParameters) IsZero() bool { + return len(p) == 0 +} + // Environ builds a list of environment variables to represent parameters sent to a plugin from the Application // manifest. Parameters are represented as one large stringified JSON array (under `ARGOCD_APP_PARAMETERS`). They're // also represented as individual environment variables, each variable's key being an escaped version of the parameter's @@ -556,13 +778,13 @@ func (p ApplicationSourcePluginParameters) Environ() ([]string, error) { if param.String_ != nil { env = append(env, fmt.Sprintf("%s=%s", envBaseName, *param.String_)) } - if param.Map != nil { - for key, value := range param.Map { + if param.OptionalMap != nil { + for key, value := range param.OptionalMap.Map { env = append(env, fmt.Sprintf("%s_%s=%s", envBaseName, escaped(key), value)) } } - if param.Array != nil { - for i, value := range param.Array { + if param.OptionalArray != nil { + for i, value := range param.OptionalArray.Array { env = append(env, fmt.Sprintf("%s_%d=%s", envBaseName, i, value)) } } @@ -584,9 +806,28 @@ type ApplicationSourcePlugin struct { Parameters ApplicationSourcePluginParameters `json:"parameters,omitempty" protobuf:"bytes,3,opt,name=parameters"` } +func (c *ApplicationSourcePlugin) Equals(other *ApplicationSourcePlugin) bool { + if c == nil && other == nil { + return true + } + if c == nil || other == nil { + return false + } + if !c.Parameters.Equals(other.Parameters) { + return false + } + // DeepEqual works fine for fields besides Parameters. Since we already know that Parameters are equal, we can + // set them to nil and then do a DeepEqual. + leftCopy := c.DeepCopy() + rightCopy := other.DeepCopy() + leftCopy.Parameters = nil + rightCopy.Parameters = nil + return reflect.DeepEqual(leftCopy, rightCopy) +} + // IsZero returns true if the ApplicationSourcePlugin is considered empty func (c *ApplicationSourcePlugin) IsZero() bool { - return c == nil || c.Name == "" && c.Env.IsZero() + return c == nil || c.Name == "" && c.Env.IsZero() && c.Parameters.IsZero() } // AddEnvEntry merges an EnvEntry into a list of entries. If an entry with the same name already exists, @@ -665,6 +906,8 @@ type ApplicationStatus struct { ResourceHealthSource ResourceHealthLocation `json:"resourceHealthSource,omitempty" protobuf:"bytes,11,opt,name=resourceHealthSource"` // SourceTypes specifies the type of the sources included in the application SourceTypes []ApplicationSourceType `json:"sourceTypes,omitempty" protobuf:"bytes,12,opt,name=sourceTypes"` + // ControllerNamespace indicates the namespace in which the application controller is located + ControllerNamespace string `json:"controllerNamespace,omitempty" protobuf:"bytes,13,opt,name=controllerNamespace"` } // JWTTokens represents a list of JWT tokens @@ -706,6 +949,8 @@ type SyncOperationResource struct { Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` Name string `json:"name" protobuf:"bytes,3,opt,name=name"` Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` + // nolint:govet + Exclude bool `json:"-"` } // RevisionHistories is a array of history, oldest first and newest last @@ -733,6 +978,17 @@ func (r SyncOperationResource) HasIdentity(name string, namespace string, gvk sc return false } +// Compare determines whether an app resource matches the resource filter during sync or wait. +func (r SyncOperationResource) Compare(name string, namespace string, gvk schema.GroupVersionKind) bool { + if (r.Group == "*" || gvk.Group == r.Group) && + (r.Kind == "*" || gvk.Kind == r.Kind) && + (r.Name == "*" || name == r.Name) && + (r.Namespace == "*" || r.Namespace == "" || namespace == r.Namespace) { + return true + } + return false +} + // SyncOperation contains details about a sync operation. type SyncOperation struct { // Revision is the revision (Git) or chart version (Helm) which to sync the application to @@ -912,7 +1168,7 @@ type Backoff struct { type SyncPolicyAutomated struct { // Prune specifies whether to delete resources from the cluster that are not found in the sources anymore as part of automated sync (default: false) Prune bool `json:"prune,omitempty" protobuf:"bytes,1,opt,name=prune"` - // SelfHeal specifes whether to revert resources back to their desired state upon modification in the cluster (default: false) + // SelfHeal specifies whether to revert resources back to their desired state upon modification in the cluster (default: false) SelfHeal bool `json:"selfHeal,omitempty" protobuf:"bytes,2,opt,name=selfHeal"` // AllowEmpty allows apps have zero live resources (default: false) AllowEmpty bool `json:"allowEmpty,omitempty" protobuf:"bytes,3,opt,name=allowEmpty"` @@ -972,6 +1228,15 @@ type RevisionMetadata struct { SignatureInfo string `json:"signatureInfo,omitempty" protobuf:"bytes,5,opt,name=signatureInfo"` } +// ChartDetails contains helm chart metadata for a specific version +type ChartDetails struct { + Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` + // The URL of this projects home page, e.g. "http://example.com" + Home string `json:"home,omitempty" protobuf:"bytes,2,opt,name=home"` + // List of maintainer details, name and email, e.g. ["John Doe "] + Maintainers []string `json:"maintainers,omitempty" protobuf:"bytes,3,opt,name=maintainers"` +} + // SyncOperationResult represent result of sync operation type SyncOperationResult struct { // Resources contains a list of sync result items for each individual resource in a sync operation @@ -984,6 +1249,8 @@ type SyncOperationResult struct { Sources ApplicationSources `json:"sources,omitempty" protobuf:"bytes,4,opt,name=sources"` // Revisions holds the revision this sync operation was performed for respective indexed source in sources field Revisions []string `json:"revisions,omitempty" protobuf:"bytes,5,opt,name=revisions"` + // ManagedNamespaceMetadata contains the current sync state of managed namespace metadata + ManagedNamespaceMetadata *ManagedNamespaceMetadata `json:"managedNamespaceMetadata,omitempty" protobuf:"bytes,6,opt,name=managedNamespaceMetadata"` } // ResourceResult holds the operation result details of a specific resource @@ -1128,7 +1395,7 @@ const ( ApplicationConditionOrphanedResourceWarning = "OrphanedResourceWarning" ) -// ApplicationCondition contains details about an application condition, which is usally an error or warning +// ApplicationCondition contains details about an application condition, which is usually an error or warning type ApplicationCondition struct { // Type is an application condition type Type ApplicationConditionType `json:"type" protobuf:"bytes,1,opt,name=type"` @@ -1146,6 +1413,8 @@ type ComparedTo struct { Destination ApplicationDestination `json:"destination" protobuf:"bytes,2,opt,name=destination"` // Sources is a reference to the application's multiple sources used for comparison Sources ApplicationSources `json:"sources,omitempty" protobuf:"bytes,3,opt,name=sources"` + // IgnoreDifferences is a reference to the application's ignored differences used for comparison + IgnoreDifferences IgnoreDifferences `json:"ignoreDifferences,omitempty" protobuf:"bytes,4,opt,name=ignoreDifferences"` } // SyncStatus contains information about the currently observed live and desired states of an application @@ -1248,7 +1517,7 @@ func (t *ApplicationTree) FindNode(group string, kind string, namespace string, } // TODO: Document purpose of this method -func (t *ApplicationTree) GetSummary() ApplicationSummary { +func (t *ApplicationTree) GetSummary(app *Application) ApplicationSummary { urlsSet := make(map[string]bool) imagesSet := make(map[string]bool) for _, node := range t.Nodes { @@ -1261,6 +1530,12 @@ func (t *ApplicationTree) GetSummary() ApplicationSummary { imagesSet[image] = true } } + // also add Application's own links + for k, v := range app.GetAnnotations() { + if strings.HasPrefix(k, common.AnnotationKeyLinkPrefix) { + urlsSet[v] = true + } + } urls := make([]string, 0) for url := range urlsSet { urls = append(urls, url) @@ -1577,9 +1852,9 @@ type KnownTypeField struct { // OverrideIgnoreDiff contains configurations about how fields should be ignored during diffs between // the desired state and live state type OverrideIgnoreDiff struct { - //JSONPointers is a JSON path list following the format defined in RFC4627 (https://datatracker.ietf.org/doc/html/rfc6902#section-3) + // JSONPointers is a JSON path list following the format defined in RFC4627 (https://datatracker.ietf.org/doc/html/rfc6902#section-3) JSONPointers []string `json:"jsonPointers" protobuf:"bytes,1,rep,name=jSONPointers"` - //JQPathExpressions is a JQ path list that will be evaludated during the diff process + // JQPathExpressions is a JQ path list that will be evaludated during the diff process JQPathExpressions []string `json:"jqPathExpressions" protobuf:"bytes,2,opt,name=jqPathExpressions"` // ManagedFieldsManagers is a list of trusted managers. Fields mutated by those managers will take precedence over the // desired state defined in the SCM and won't be displayed in diffs @@ -1587,21 +1862,23 @@ type OverrideIgnoreDiff struct { } type rawResourceOverride struct { - HealthLua string `json:"health.lua,omitempty"` - UseOpenLibs bool `json:"health.lua.useOpenLibs,omitempty"` - Actions string `json:"actions,omitempty"` - IgnoreDifferences string `json:"ignoreDifferences,omitempty"` - KnownTypeFields []KnownTypeField `json:"knownTypeFields,omitempty"` + HealthLua string `json:"health.lua,omitempty"` + UseOpenLibs bool `json:"health.lua.useOpenLibs,omitempty"` + Actions string `json:"actions,omitempty"` + IgnoreDifferences string `json:"ignoreDifferences,omitempty"` + IgnoreResourceUpdates string `json:"ignoreResourceUpdates,omitempty"` + KnownTypeFields []KnownTypeField `json:"knownTypeFields,omitempty"` } // ResourceOverride holds configuration to customize resource diffing and health assessment // TODO: describe the members of this type type ResourceOverride struct { - HealthLua string `protobuf:"bytes,1,opt,name=healthLua"` - UseOpenLibs bool `protobuf:"bytes,5,opt,name=useOpenLibs"` - Actions string `protobuf:"bytes,3,opt,name=actions"` - IgnoreDifferences OverrideIgnoreDiff `protobuf:"bytes,2,opt,name=ignoreDifferences"` - KnownTypeFields []KnownTypeField `protobuf:"bytes,4,opt,name=knownTypeFields"` + HealthLua string `protobuf:"bytes,1,opt,name=healthLua"` + UseOpenLibs bool `protobuf:"bytes,5,opt,name=useOpenLibs"` + Actions string `protobuf:"bytes,3,opt,name=actions"` + IgnoreDifferences OverrideIgnoreDiff `protobuf:"bytes,2,opt,name=ignoreDifferences"` + IgnoreResourceUpdates OverrideIgnoreDiff `protobuf:"bytes,6,opt,name=ignoreResourceUpdates"` + KnownTypeFields []KnownTypeField `protobuf:"bytes,4,opt,name=knownTypeFields"` } // TODO: describe this method @@ -1614,7 +1891,15 @@ func (s *ResourceOverride) UnmarshalJSON(data []byte) error { s.HealthLua = raw.HealthLua s.UseOpenLibs = raw.UseOpenLibs s.Actions = raw.Actions - return yaml.Unmarshal([]byte(raw.IgnoreDifferences), &s.IgnoreDifferences) + err := yaml.Unmarshal([]byte(raw.IgnoreDifferences), &s.IgnoreDifferences) + if err != nil { + return err + } + err = yaml.Unmarshal([]byte(raw.IgnoreResourceUpdates), &s.IgnoreResourceUpdates) + if err != nil { + return err + } + return nil } // TODO: describe this method @@ -1623,7 +1908,11 @@ func (s ResourceOverride) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - raw := &rawResourceOverride{s.HealthLua, s.UseOpenLibs, s.Actions, string(ignoreDifferencesData), s.KnownTypeFields} + ignoreResourceUpdatesData, err := yaml.Marshal(s.IgnoreResourceUpdates) + if err != nil { + return nil, err + } + raw := &rawResourceOverride{s.HealthLua, s.UseOpenLibs, s.Actions, string(ignoreDifferencesData), string(ignoreResourceUpdatesData), s.KnownTypeFields} return json.Marshal(raw) } @@ -1654,9 +1943,11 @@ type ResourceActionDefinition struct { // TODO: describe this type // TODO: describe members of this type type ResourceAction struct { - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - Params []ResourceActionParam `json:"params,omitempty" protobuf:"bytes,2,rep,name=params"` - Disabled bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + Params []ResourceActionParam `json:"params,omitempty" protobuf:"bytes,2,rep,name=params"` + Disabled bool `json:"disabled,omitempty" protobuf:"varint,3,opt,name=disabled"` + IconClass string `json:"iconClass,omitempty" protobuf:"bytes,4,opt,name=iconClass"` + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,5,opt,name=displayName"` } // TODO: describe this type @@ -1853,7 +2144,7 @@ type SyncWindow struct { Clusters []string `json:"clusters,omitempty" protobuf:"bytes,6,opt,name=clusters"` // ManualSync enables manual syncs when they would otherwise be blocked ManualSync bool `json:"manualSync,omitempty" protobuf:"bytes,7,opt,name=manualSync"` - //TimeZone of the sync that will be applied to the schedule + // TimeZone of the sync that will be applied to the schedule TimeZone string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` } @@ -2049,6 +2340,10 @@ func (w *SyncWindows) CanSync(isManual bool) bool { } } + if active.hasAllow() { + return true + } + inactiveAllows := w.InactiveAllows() if inactiveAllows.HasWindows() { if isManual && inactiveAllows.manualEnabled() { @@ -2323,6 +2618,13 @@ func (app *Application) GetPropagationPolicy() string { return "" } +// HasChangedManagedNamespaceMetadata checks whether app.Spec.SyncPolicy.ManagedNamespaceMetadata differs from the +// managed namespace metadata which has been stored app.Status.OperationState.SyncResult. If they differ a refresh should +// be triggered. +func (app *Application) HasChangedManagedNamespaceMetadata() bool { + return app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil && app.Status.OperationState != nil && app.Status.OperationState.SyncResult != nil && !reflect.DeepEqual(app.Spec.SyncPolicy.ManagedNamespaceMetadata, app.Status.OperationState.SyncResult.ManagedNamespaceMetadata) +} + // IsFinalizerPresent checks if the app has a given finalizer func (app *Application) IsFinalizerPresent(finalizer string) bool { return getFinalizerIndex(app.ObjectMeta, finalizer) > -1 @@ -2394,8 +2696,23 @@ func (condition *ApplicationCondition) IsError() bool { } // Equals compares two instances of ApplicationSource and return true if instances are equal. -func (source *ApplicationSource) Equals(other ApplicationSource) bool { - return reflect.DeepEqual(*source, other) +func (source *ApplicationSource) Equals(other *ApplicationSource) bool { + if source == nil && other == nil { + return true + } + if source == nil || other == nil { + return false + } + if !source.Plugin.Equals(other.Plugin) { + return false + } + // reflect.DeepEqual works fine for the other fields. Since the plugin fields are equal, set them to null so they're + // not considered in the DeepEqual comparison. + sourceCopy := source.DeepCopy() + otherCopy := other.DeepCopy() + sourceCopy.Plugin = nil + otherCopy.Plugin = nil + return reflect.DeepEqual(sourceCopy, otherCopy) } // ExplicitType returns the type (e.g. Helm, Kustomize, etc) of the application. If either none or multiple types are defined, returns an error. @@ -2540,7 +2857,7 @@ func SetK8SConfigDefaults(config *rest.Config) error { func (c *Cluster) RawRestConfig() *rest.Config { var config *rest.Config var err error - if c.Server == KubernetesInternalAPIServerAddr && os.Getenv(EnvVarFakeInClusterConfig) == "true" { + if c.Server == KubernetesInternalAPIServerAddr && env.ParseBoolFromEnv(EnvVarFakeInClusterConfig, false) { conf, exists := os.LookupEnv("KUBECONFIG") if exists { config, err = clientcmd.BuildConfigFromFlags("", conf) @@ -2709,5 +3026,5 @@ func (a *Application) QualifiedName() string { // RBACName returns the full qualified RBAC resource name for the application // in a backwards-compatible way. func (a *Application) RBACName(defaultNS string) string { - return security.AppRBACName(defaultNS, a.Spec.GetProject(), a.Namespace, a.Name) + return security.RBACName(defaultNS, a.Spec.GetProject(), a.Namespace, a.Name) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go new file mode 100644 index 0000000000..942e2a651c --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/values.go @@ -0,0 +1,61 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + reflect "reflect" + "strings" + + runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" +) + +// Set the ValuesObject property to the json representation of the yaml contained in value +// Remove Values property if present +func (h *ApplicationSourceHelm) SetValuesString(value string) error { + if value == "" { + h.ValuesObject = nil + h.Values = "" + } else { + data, err := yaml.YAMLToJSON([]byte(value)) + if err != nil { + return fmt.Errorf("failed converting yaml to json: %v", err) + } + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return fmt.Errorf("failed to unmarshal json: %v", err) + } + switch v.(type) { + case string: + case map[string]interface{}: + default: + return fmt.Errorf("invalid type %q", reflect.TypeOf(v)) + } + h.ValuesObject = &runtime.RawExtension{Raw: data} + h.Values = "" + } + return nil +} + +func (h *ApplicationSourceHelm) ValuesYAML() []byte { + if h.ValuesObject == nil || h.ValuesObject.Raw == nil { + return []byte(h.Values) + } + b, err := yaml.JSONToYAML(h.ValuesObject.Raw) + if err != nil { + // This should be impossible, because rawValue isn't set directly. + return []byte{} + } + return b +} + +func (h *ApplicationSourceHelm) ValuesIsEmpty() bool { + return len(h.ValuesYAML()) == 0 +} + +func (h *ApplicationSourceHelm) ValuesString() string { + if h.ValuesObject == nil || h.ValuesObject.Raw == nil { + return h.Values + } + return strings.TrimSuffix(string(h.ValuesYAML()), "\n") +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go index 15cc07f3c1..8d10b219f0 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go @@ -315,6 +315,27 @@ func (in *ApplicationMatchExpression) DeepCopy() *ApplicationMatchExpression { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationPreservedFields) DeepCopyInto(out *ApplicationPreservedFields) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationPreservedFields. +func (in *ApplicationPreservedFields) DeepCopy() *ApplicationPreservedFields { + if in == nil { + return nil + } + out := new(ApplicationPreservedFields) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationSet) DeepCopyInto(out *ApplicationSet) { *out = *in @@ -431,6 +452,11 @@ func (in *ApplicationSetGenerator) DeepCopyInto(out *ApplicationSetGenerator) { *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(PluginGenerator) + (*in).DeepCopyInto(*out) + } return } @@ -525,6 +551,11 @@ func (in *ApplicationSetNestedGenerator) DeepCopyInto(out *ApplicationSetNestedG *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(PluginGenerator) + (*in).DeepCopyInto(*out) + } return } @@ -625,13 +656,23 @@ func (in *ApplicationSetSpec) DeepCopyInto(out *ApplicationSetSpec) { if in.SyncPolicy != nil { in, out := &in.SyncPolicy, &out.SyncPolicy *out = new(ApplicationSetSyncPolicy) - **out = **in + (*in).DeepCopyInto(*out) } if in.Strategy != nil { in, out := &in.Strategy, &out.Strategy *out = new(ApplicationSetStrategy) (*in).DeepCopyInto(*out) } + if in.PreservedFields != nil { + in, out := &in.PreservedFields, &out.PreservedFields + *out = new(ApplicationPreservedFields) + (*in).DeepCopyInto(*out) + } + if in.GoTemplateOptions != nil { + in, out := &in.GoTemplateOptions, &out.GoTemplateOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -699,6 +740,11 @@ func (in *ApplicationSetStrategy) DeepCopy() *ApplicationSetStrategy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationSetSyncPolicy) DeepCopyInto(out *ApplicationSetSyncPolicy) { *out = *in + if in.ApplicationsSync != nil { + in, out := &in.ApplicationsSync, &out.ApplicationsSync + *out = new(ApplicationsSyncPolicy) + **out = **in + } return } @@ -798,6 +844,16 @@ func (in *ApplicationSetTerminalGenerator) DeepCopyInto(out *ApplicationSetTermi *out = new(PullRequestGenerator) (*in).DeepCopyInto(*out) } + if in.Plugin != nil { + in, out := &in.Plugin, &out.Plugin + *out = new(PluginGenerator) + (*in).DeepCopyInto(*out) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } return } @@ -904,6 +960,11 @@ func (in *ApplicationSourceHelm) DeepCopyInto(out *ApplicationSourceHelm) { *out = make([]HelmFileParameter, len(*in)) copy(*out, *in) } + if in.ValuesObject != nil { + in, out := &in.ValuesObject, &out.ValuesObject + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } return } @@ -970,6 +1031,11 @@ func (in *ApplicationSourceKustomize) DeepCopyInto(out *ApplicationSourceKustomi (*out)[key] = val } } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = make(KustomizeReplicas, len(*in)) + copy(*out, *in) + } return } @@ -1025,17 +1091,15 @@ func (in *ApplicationSourcePluginParameter) DeepCopyInto(out *ApplicationSourceP *out = new(string) **out = **in } - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.OptionalMap != nil { + in, out := &in.OptionalMap, &out.OptionalMap + *out = new(OptionalMap) + (*in).DeepCopyInto(*out) } - if in.Array != nil { - in, out := &in.Array, &out.Array - *out = make([]string, len(*in)) - copy(*out, *in) + if in.OptionalArray != nil { + in, out := &in.OptionalArray, &out.OptionalArray + *out = new(OptionalArray) + (*in).DeepCopyInto(*out) } return } @@ -1110,7 +1174,7 @@ func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { } if in.IgnoreDifferences != nil { in, out := &in.IgnoreDifferences, &out.IgnoreDifferences - *out = make([]ResourceIgnoreDifferences, len(*in)) + *out = make(IgnoreDifferences, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1325,6 +1389,48 @@ func (in *BasicAuthBitbucketServer) DeepCopy() *BasicAuthBitbucketServer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BearerTokenBitbucketCloud) DeepCopyInto(out *BearerTokenBitbucketCloud) { + *out = *in + if in.TokenRef != nil { + in, out := &in.TokenRef, &out.TokenRef + *out = new(SecretRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BearerTokenBitbucketCloud. +func (in *BearerTokenBitbucketCloud) DeepCopy() *BearerTokenBitbucketCloud { + if in == nil { + return nil + } + out := new(BearerTokenBitbucketCloud) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChartDetails) DeepCopyInto(out *ChartDetails) { + *out = *in + if in.Maintainers != nil { + in, out := &in.Maintainers, &out.Maintainers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartDetails. +func (in *ChartDetails) DeepCopy() *ChartDetails { + if in == nil { + return nil + } + out := new(ChartDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -1529,6 +1635,13 @@ func (in *ComparedTo) DeepCopyInto(out *ComparedTo) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.IgnoreDifferences != nil { + in, out := &in.IgnoreDifferences, &out.IgnoreDifferences + *out = make(IgnoreDifferences, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1751,6 +1864,13 @@ func (in *GitGenerator) DeepCopyInto(out *GitGenerator) { **out = **in } in.Template.DeepCopyInto(&out.Template) + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1909,6 +2029,28 @@ func (in *HostResourceInfo) DeepCopy() *HostResourceInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in IgnoreDifferences) DeepCopyInto(out *IgnoreDifferences) { + { + in := &in + *out = make(IgnoreDifferences, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IgnoreDifferences. +func (in IgnoreDifferences) DeepCopy() IgnoreDifferences { + if in == nil { + return nil + } + out := new(IgnoreDifferences) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Info) DeepCopyInto(out *Info) { *out = *in @@ -2046,6 +2188,43 @@ func (in *KustomizeOptions) DeepCopy() *KustomizeOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KustomizeReplica) DeepCopyInto(out *KustomizeReplica) { + *out = *in + out.Count = in.Count + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizeReplica. +func (in *KustomizeReplica) DeepCopy() *KustomizeReplica { + if in == nil { + return nil + } + out := new(KustomizeReplica) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in KustomizeReplicas) DeepCopyInto(out *KustomizeReplicas) { + { + in := &in + *out = make(KustomizeReplicas, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizeReplicas. +func (in KustomizeReplicas) DeepCopy() KustomizeReplicas { + if in == nil { + return nil + } + out := new(KustomizeReplicas) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListGenerator) DeepCopyInto(out *ListGenerator) { *out = *in @@ -2281,6 +2460,50 @@ func (in *OperationState) DeepCopy() *OperationState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionalArray) DeepCopyInto(out *OptionalArray) { + *out = *in + if in.Array != nil { + in, out := &in.Array, &out.Array + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalArray. +func (in *OptionalArray) DeepCopy() *OptionalArray { + if in == nil { + return nil + } + out := new(OptionalArray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionalMap) DeepCopyInto(out *OptionalMap) { + *out = *in + if in.Map != nil { + in, out := &in.Map, &out.Map + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalMap. +func (in *OptionalMap) DeepCopy() *OptionalMap { + if in == nil { + return nil + } + out := new(OptionalMap) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OrphanedResourceKey) DeepCopyInto(out *OrphanedResourceKey) { *out = *in @@ -2354,6 +2577,98 @@ func (in *OverrideIgnoreDiff) DeepCopy() *OverrideIgnoreDiff { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfigMapRef) DeepCopyInto(out *PluginConfigMapRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigMapRef. +func (in *PluginConfigMapRef) DeepCopy() *PluginConfigMapRef { + if in == nil { + return nil + } + out := new(PluginConfigMapRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginGenerator) DeepCopyInto(out *PluginGenerator) { + *out = *in + out.ConfigMapRef = in.ConfigMapRef + in.Input.DeepCopyInto(&out.Input) + if in.RequeueAfterSeconds != nil { + in, out := &in.RequeueAfterSeconds, &out.RequeueAfterSeconds + *out = new(int64) + **out = **in + } + in.Template.DeepCopyInto(&out.Template) + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginGenerator. +func (in *PluginGenerator) DeepCopy() *PluginGenerator { + if in == nil { + return nil + } + out := new(PluginGenerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginInput) DeepCopyInto(out *PluginInput) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(PluginParameters, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginInput. +func (in *PluginInput) DeepCopy() *PluginInput { + if in == nil { + return nil + } + out := new(PluginInput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PluginParameters) DeepCopyInto(out *PluginParameters) { + { + in := &in + *out = make(PluginParameters, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginParameters. +func (in PluginParameters) DeepCopy() PluginParameters { + if in == nil { + return nil + } + out := new(PluginParameters) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectRole) DeepCopyInto(out *ProjectRole) { *out = *in @@ -2421,6 +2736,16 @@ func (in *PullRequestGenerator) DeepCopyInto(out *PullRequestGenerator) { **out = **in } in.Template.DeepCopyInto(&out.Template) + if in.Bitbucket != nil { + in, out := &in.Bitbucket, &out.Bitbucket + *out = new(PullRequestGeneratorBitbucket) + (*in).DeepCopyInto(*out) + } + if in.AzureDevOps != nil { + in, out := &in.AzureDevOps, &out.AzureDevOps + *out = new(PullRequestGeneratorAzureDevOps) + (*in).DeepCopyInto(*out) + } return } @@ -2434,6 +2759,58 @@ func (in *PullRequestGenerator) DeepCopy() *PullRequestGenerator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullRequestGeneratorAzureDevOps) DeepCopyInto(out *PullRequestGeneratorAzureDevOps) { + *out = *in + if in.TokenRef != nil { + in, out := &in.TokenRef, &out.TokenRef + *out = new(SecretRef) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestGeneratorAzureDevOps. +func (in *PullRequestGeneratorAzureDevOps) DeepCopy() *PullRequestGeneratorAzureDevOps { + if in == nil { + return nil + } + out := new(PullRequestGeneratorAzureDevOps) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullRequestGeneratorBitbucket) DeepCopyInto(out *PullRequestGeneratorBitbucket) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthBitbucketServer) + (*in).DeepCopyInto(*out) + } + if in.BearerToken != nil { + in, out := &in.BearerToken, &out.BearerToken + *out = new(BearerTokenBitbucketCloud) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestGeneratorBitbucket. +func (in *PullRequestGeneratorBitbucket) DeepCopy() *PullRequestGeneratorBitbucket { + if in == nil { + return nil + } + out := new(PullRequestGeneratorBitbucket) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PullRequestGeneratorBitbucketServer) DeepCopyInto(out *PullRequestGeneratorBitbucketServer) { *out = *in @@ -2463,6 +2840,11 @@ func (in *PullRequestGeneratorFilter) DeepCopyInto(out *PullRequestGeneratorFilt *out = new(string) **out = **in } + if in.TargetBranchMatch != nil { + in, out := &in.TargetBranchMatch, &out.TargetBranchMatch + *out = new(string) + **out = **in + } return } @@ -2968,6 +3350,7 @@ func (in *ResourceNode) DeepCopy() *ResourceNode { func (in *ResourceOverride) DeepCopyInto(out *ResourceOverride) { *out = *in in.IgnoreDifferences.DeepCopyInto(&out.IgnoreDifferences) + in.IgnoreResourceUpdates.DeepCopyInto(&out.IgnoreResourceUpdates) if in.KnownTypeFields != nil { in, out := &in.KnownTypeFields, &out.KnownTypeFields *out = make([]KnownTypeField, len(*in)) @@ -3210,6 +3593,18 @@ func (in *SCMProviderGenerator) DeepCopyInto(out *SCMProviderGenerator) { **out = **in } in.Template.DeepCopyInto(&out.Template) + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AWSCodeCommit != nil { + in, out := &in.AWSCodeCommit, &out.AWSCodeCommit + *out = new(SCMProviderGeneratorAWSCodeCommit) + (*in).DeepCopyInto(*out) + } return } @@ -3223,6 +3618,33 @@ func (in *SCMProviderGenerator) DeepCopy() *SCMProviderGenerator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SCMProviderGeneratorAWSCodeCommit) DeepCopyInto(out *SCMProviderGeneratorAWSCodeCommit) { + *out = *in + if in.TagFilters != nil { + in, out := &in.TagFilters, &out.TagFilters + *out = make([]*TagFilter, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TagFilter) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SCMProviderGeneratorAWSCodeCommit. +func (in *SCMProviderGeneratorAWSCodeCommit) DeepCopy() *SCMProviderGeneratorAWSCodeCommit { + if in == nil { + return nil + } + out := new(SCMProviderGeneratorAWSCodeCommit) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SCMProviderGeneratorAzureDevOps) DeepCopyInto(out *SCMProviderGeneratorAzureDevOps) { *out = *in @@ -3518,6 +3940,11 @@ func (in *SyncOperationResult) DeepCopyInto(out *SyncOperationResult) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ManagedNamespaceMetadata != nil { + in, out := &in.ManagedNamespaceMetadata, &out.ManagedNamespaceMetadata + *out = new(ManagedNamespaceMetadata) + (*in).DeepCopyInto(*out) + } return } @@ -3771,3 +4198,19 @@ func (in *TLSClientConfig) DeepCopy() *TLSClientConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagFilter) DeepCopyInto(out *TagFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagFilter. +func (in *TagFilter) DeepCopy() *TagFilter { + if in == nil { + return nil + } + out := new(TagFilter) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go index 1ef95466cd..417dc758ef 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go +++ b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/clientset.go @@ -3,12 +3,12 @@ package apiclient import ( "crypto/tls" "crypto/x509" + "fmt" "time" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" log "github.com/sirupsen/logrus" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -17,6 +17,8 @@ import ( "github.com/argoproj/argo-cd/v2/util/io" ) +//go:generate go run github.com/vektra/mockery/v2@v2.15.0 --name=RepoServerServiceClient + const ( // MaxGRPCMessageSize contains max grpc message size MaxGRPCMessageSize = 100 * 1024 * 1024 @@ -46,7 +48,7 @@ type clientSet struct { func (c *clientSet) NewRepoServerClient() (io.Closer, RepoServerServiceClient, error) { conn, err := NewConnection(c.address, c.timeoutSeconds, &c.tlsConfig) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to open a new connection to repo server: %w", err) } return conn, NewRepoServerServiceClient(conn), nil } @@ -64,8 +66,8 @@ func NewConnection(address string, timeoutSeconds int, tlsConfig *TLSConfigurati grpc.WithStreamInterceptor(grpc_retry.StreamClientInterceptor(retryOpts...)), grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryInterceptors...)), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxGRPCMessageSize), grpc.MaxCallSendMsgSize(MaxGRPCMessageSize)), - grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), - grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()), + grpc.WithUnaryInterceptor(argogrpc.OTELUnaryClientInterceptor()), + grpc.WithStreamInterceptor(argogrpc.OTELStreamClientInterceptor()), } tlsC := &tls.Config{} diff --git a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go index f198076769..dd5a4559ac 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go @@ -36,14 +36,15 @@ type ManifestRequest struct { NoCache bool `protobuf:"varint,3,opt,name=noCache,proto3" json:"noCache,omitempty"` AppLabelKey string `protobuf:"bytes,4,opt,name=appLabelKey,proto3" json:"appLabelKey,omitempty"` // Name of the application for which the request is triggered - AppName string `protobuf:"bytes,5,opt,name=appName,proto3" json:"appName,omitempty"` - Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"` - ApplicationSource *v1alpha1.ApplicationSource `protobuf:"bytes,10,opt,name=applicationSource,proto3" json:"applicationSource,omitempty"` - Repos []*v1alpha1.Repository `protobuf:"bytes,11,rep,name=repos,proto3" json:"repos,omitempty"` - Plugins []*v1alpha1.ConfigManagementPlugin `protobuf:"bytes,12,rep,name=plugins,proto3" json:"plugins,omitempty"` - KustomizeOptions *v1alpha1.KustomizeOptions `protobuf:"bytes,13,opt,name=kustomizeOptions,proto3" json:"kustomizeOptions,omitempty"` - KubeVersion string `protobuf:"bytes,14,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` - ApiVersions []string `protobuf:"bytes,15,rep,name=apiVersions,proto3" json:"apiVersions,omitempty"` + AppName string `protobuf:"bytes,5,opt,name=appName,proto3" json:"appName,omitempty"` + Namespace string `protobuf:"bytes,8,opt,name=namespace,proto3" json:"namespace,omitempty"` + ApplicationSource *v1alpha1.ApplicationSource `protobuf:"bytes,10,opt,name=applicationSource,proto3" json:"applicationSource,omitempty"` + Repos []*v1alpha1.Repository `protobuf:"bytes,11,rep,name=repos,proto3" json:"repos,omitempty"` + // Deprecated: use sidecar plugins instead. + Plugins []*v1alpha1.ConfigManagementPlugin `protobuf:"bytes,12,rep,name=plugins,proto3" json:"plugins,omitempty"` + KustomizeOptions *v1alpha1.KustomizeOptions `protobuf:"bytes,13,opt,name=kustomizeOptions,proto3" json:"kustomizeOptions,omitempty"` + KubeVersion string `protobuf:"bytes,14,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` + ApiVersions []string `protobuf:"bytes,15,rep,name=apiVersions,proto3" json:"apiVersions,omitempty"` // Request to verify the signature when generating the manifests (only for Git repositories) VerifySignature bool `protobuf:"varint,16,opt,name=verifySignature,proto3" json:"verifySignature,omitempty"` HelmRepoCreds []*v1alpha1.RepoCreds `protobuf:"bytes,17,rep,name=helmRepoCreds,proto3" json:"helmRepoCreds,omitempty"` @@ -1327,6 +1328,72 @@ func (m *RepoServerRevisionMetadataRequest) GetCheckSignature() bool { return false } +type RepoServerRevisionChartDetailsRequest struct { + // the repo + Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` + // the chart + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // the revision within the chart + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RepoServerRevisionChartDetailsRequest) Reset() { *m = RepoServerRevisionChartDetailsRequest{} } +func (m *RepoServerRevisionChartDetailsRequest) String() string { return proto.CompactTextString(m) } +func (*RepoServerRevisionChartDetailsRequest) ProtoMessage() {} +func (*RepoServerRevisionChartDetailsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{18} +} +func (m *RepoServerRevisionChartDetailsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepoServerRevisionChartDetailsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RepoServerRevisionChartDetailsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RepoServerRevisionChartDetailsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepoServerRevisionChartDetailsRequest.Merge(m, src) +} +func (m *RepoServerRevisionChartDetailsRequest) XXX_Size() int { + return m.Size() +} +func (m *RepoServerRevisionChartDetailsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RepoServerRevisionChartDetailsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RepoServerRevisionChartDetailsRequest proto.InternalMessageInfo + +func (m *RepoServerRevisionChartDetailsRequest) GetRepo() *v1alpha1.Repository { + if m != nil { + return m.Repo + } + return nil +} + +func (m *RepoServerRevisionChartDetailsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RepoServerRevisionChartDetailsRequest) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + // HelmAppSpec contains helm app name in source repo type HelmAppSpec struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1346,7 +1413,7 @@ func (m *HelmAppSpec) Reset() { *m = HelmAppSpec{} } func (m *HelmAppSpec) String() string { return proto.CompactTextString(m) } func (*HelmAppSpec) ProtoMessage() {} func (*HelmAppSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{18} + return fileDescriptor_dd8723cfcc820480, []int{19} } func (m *HelmAppSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1423,7 +1490,7 @@ func (m *KustomizeAppSpec) Reset() { *m = KustomizeAppSpec{} } func (m *KustomizeAppSpec) String() string { return proto.CompactTextString(m) } func (*KustomizeAppSpec) ProtoMessage() {} func (*KustomizeAppSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{19} + return fileDescriptor_dd8723cfcc820480, []int{20} } func (m *KustomizeAppSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1470,7 +1537,7 @@ func (m *DirectoryAppSpec) Reset() { *m = DirectoryAppSpec{} } func (m *DirectoryAppSpec) String() string { return proto.CompactTextString(m) } func (*DirectoryAppSpec) ProtoMessage() {} func (*DirectoryAppSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{20} + return fileDescriptor_dd8723cfcc820480, []int{21} } func (m *DirectoryAppSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1530,7 +1597,7 @@ func (m *ParameterAnnouncement) Reset() { *m = ParameterAnnouncement{} } func (m *ParameterAnnouncement) String() string { return proto.CompactTextString(m) } func (*ParameterAnnouncement) ProtoMessage() {} func (*ParameterAnnouncement) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{21} + return fileDescriptor_dd8723cfcc820480, []int{22} } func (m *ParameterAnnouncement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1634,7 +1701,7 @@ func (m *PluginAppSpec) Reset() { *m = PluginAppSpec{} } func (m *PluginAppSpec) String() string { return proto.CompactTextString(m) } func (*PluginAppSpec) ProtoMessage() {} func (*PluginAppSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{22} + return fileDescriptor_dd8723cfcc820480, []int{23} } func (m *PluginAppSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1681,7 +1748,7 @@ func (m *HelmChartsRequest) Reset() { *m = HelmChartsRequest{} } func (m *HelmChartsRequest) String() string { return proto.CompactTextString(m) } func (*HelmChartsRequest) ProtoMessage() {} func (*HelmChartsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{23} + return fileDescriptor_dd8723cfcc820480, []int{24} } func (m *HelmChartsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1729,7 +1796,7 @@ func (m *HelmChart) Reset() { *m = HelmChart{} } func (m *HelmChart) String() string { return proto.CompactTextString(m) } func (*HelmChart) ProtoMessage() {} func (*HelmChart) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{24} + return fileDescriptor_dd8723cfcc820480, []int{25} } func (m *HelmChart) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1783,7 +1850,7 @@ func (m *HelmChartsResponse) Reset() { *m = HelmChartsResponse{} } func (m *HelmChartsResponse) String() string { return proto.CompactTextString(m) } func (*HelmChartsResponse) ProtoMessage() {} func (*HelmChartsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_dd8723cfcc820480, []int{25} + return fileDescriptor_dd8723cfcc820480, []int{26} } func (m *HelmChartsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1819,6 +1886,260 @@ func (m *HelmChartsResponse) GetItems() []*HelmChart { return nil } +type GitFilesRequest struct { + Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` + SubmoduleEnabled bool `protobuf:"varint,2,opt,name=submoduleEnabled,proto3" json:"submoduleEnabled,omitempty"` + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + NewGitFileGlobbingEnabled bool `protobuf:"varint,5,opt,name=NewGitFileGlobbingEnabled,proto3" json:"NewGitFileGlobbingEnabled,omitempty"` + NoRevisionCache bool `protobuf:"varint,6,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GitFilesRequest) Reset() { *m = GitFilesRequest{} } +func (m *GitFilesRequest) String() string { return proto.CompactTextString(m) } +func (*GitFilesRequest) ProtoMessage() {} +func (*GitFilesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{27} +} +func (m *GitFilesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitFilesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GitFilesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GitFilesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitFilesRequest.Merge(m, src) +} +func (m *GitFilesRequest) XXX_Size() int { + return m.Size() +} +func (m *GitFilesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GitFilesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GitFilesRequest proto.InternalMessageInfo + +func (m *GitFilesRequest) GetRepo() *v1alpha1.Repository { + if m != nil { + return m.Repo + } + return nil +} + +func (m *GitFilesRequest) GetSubmoduleEnabled() bool { + if m != nil { + return m.SubmoduleEnabled + } + return false +} + +func (m *GitFilesRequest) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + +func (m *GitFilesRequest) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *GitFilesRequest) GetNewGitFileGlobbingEnabled() bool { + if m != nil { + return m.NewGitFileGlobbingEnabled + } + return false +} + +func (m *GitFilesRequest) GetNoRevisionCache() bool { + if m != nil { + return m.NoRevisionCache + } + return false +} + +type GitFilesResponse struct { + // Map consisting of path of the path to its contents in bytes + Map map[string][]byte `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GitFilesResponse) Reset() { *m = GitFilesResponse{} } +func (m *GitFilesResponse) String() string { return proto.CompactTextString(m) } +func (*GitFilesResponse) ProtoMessage() {} +func (*GitFilesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{28} +} +func (m *GitFilesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitFilesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GitFilesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GitFilesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitFilesResponse.Merge(m, src) +} +func (m *GitFilesResponse) XXX_Size() int { + return m.Size() +} +func (m *GitFilesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GitFilesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GitFilesResponse proto.InternalMessageInfo + +func (m *GitFilesResponse) GetMap() map[string][]byte { + if m != nil { + return m.Map + } + return nil +} + +type GitDirectoriesRequest struct { + Repo *v1alpha1.Repository `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"` + SubmoduleEnabled bool `protobuf:"varint,2,opt,name=submoduleEnabled,proto3" json:"submoduleEnabled,omitempty"` + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + NoRevisionCache bool `protobuf:"varint,4,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GitDirectoriesRequest) Reset() { *m = GitDirectoriesRequest{} } +func (m *GitDirectoriesRequest) String() string { return proto.CompactTextString(m) } +func (*GitDirectoriesRequest) ProtoMessage() {} +func (*GitDirectoriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{29} +} +func (m *GitDirectoriesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitDirectoriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GitDirectoriesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GitDirectoriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitDirectoriesRequest.Merge(m, src) +} +func (m *GitDirectoriesRequest) XXX_Size() int { + return m.Size() +} +func (m *GitDirectoriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GitDirectoriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GitDirectoriesRequest proto.InternalMessageInfo + +func (m *GitDirectoriesRequest) GetRepo() *v1alpha1.Repository { + if m != nil { + return m.Repo + } + return nil +} + +func (m *GitDirectoriesRequest) GetSubmoduleEnabled() bool { + if m != nil { + return m.SubmoduleEnabled + } + return false +} + +func (m *GitDirectoriesRequest) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + +func (m *GitDirectoriesRequest) GetNoRevisionCache() bool { + if m != nil { + return m.NoRevisionCache + } + return false +} + +type GitDirectoriesResponse struct { + // A set of directory paths + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GitDirectoriesResponse) Reset() { *m = GitDirectoriesResponse{} } +func (m *GitDirectoriesResponse) String() string { return proto.CompactTextString(m) } +func (*GitDirectoriesResponse) ProtoMessage() {} +func (*GitDirectoriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_dd8723cfcc820480, []int{30} +} +func (m *GitDirectoriesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitDirectoriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GitDirectoriesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GitDirectoriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitDirectoriesResponse.Merge(m, src) +} +func (m *GitDirectoriesResponse) XXX_Size() int { + return m.Size() +} +func (m *GitDirectoriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GitDirectoriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GitDirectoriesResponse proto.InternalMessageInfo + +func (m *GitDirectoriesResponse) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + func init() { proto.RegisterType((*ManifestRequest)(nil), "repository.ManifestRequest") proto.RegisterMapType((map[string]bool)(nil), "repository.ManifestRequest.EnabledSourceTypesEntry") @@ -1844,6 +2165,7 @@ func init() { proto.RegisterMapType((map[string]*v1alpha1.RefTarget)(nil), "repository.RepoServerAppDetailsQuery.RefSourcesEntry") proto.RegisterType((*RepoAppDetailsResponse)(nil), "repository.RepoAppDetailsResponse") proto.RegisterType((*RepoServerRevisionMetadataRequest)(nil), "repository.RepoServerRevisionMetadataRequest") + proto.RegisterType((*RepoServerRevisionChartDetailsRequest)(nil), "repository.RepoServerRevisionChartDetailsRequest") proto.RegisterType((*HelmAppSpec)(nil), "repository.HelmAppSpec") proto.RegisterType((*KustomizeAppSpec)(nil), "repository.KustomizeAppSpec") proto.RegisterType((*DirectoryAppSpec)(nil), "repository.DirectoryAppSpec") @@ -1853,6 +2175,11 @@ func init() { proto.RegisterType((*HelmChartsRequest)(nil), "repository.HelmChartsRequest") proto.RegisterType((*HelmChart)(nil), "repository.HelmChart") proto.RegisterType((*HelmChartsResponse)(nil), "repository.HelmChartsResponse") + proto.RegisterType((*GitFilesRequest)(nil), "repository.GitFilesRequest") + proto.RegisterType((*GitFilesResponse)(nil), "repository.GitFilesResponse") + proto.RegisterMapType((map[string][]byte)(nil), "repository.GitFilesResponse.MapEntry") + proto.RegisterType((*GitDirectoriesRequest)(nil), "repository.GitDirectoriesRequest") + proto.RegisterType((*GitDirectoriesResponse)(nil), "repository.GitDirectoriesResponse") } func init() { @@ -1860,124 +2187,138 @@ func init() { } var fileDescriptor_dd8723cfcc820480 = []byte{ - // 1870 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x19, 0xdb, 0x6e, 0x1c, 0x49, - 0xd5, 0x73, 0xb1, 0x3d, 0x73, 0x9c, 0xf8, 0x52, 0x49, 0x9c, 0xce, 0x6c, 0xd6, 0xf2, 0x36, 0x10, - 0x99, 0xcd, 0x6e, 0x8f, 0xe2, 0x68, 0x77, 0x51, 0x16, 0x16, 0x79, 0xbd, 0x49, 0x1c, 0x25, 0x4e, - 0x4c, 0x27, 0x80, 0x16, 0x02, 0xa8, 0xdc, 0x53, 0xd3, 0x53, 0x3b, 0x7d, 0xa9, 0x74, 0x57, 0x0f, - 0x9a, 0x48, 0x3c, 0x20, 0x21, 0x24, 0x7e, 0x00, 0xf1, 0x27, 0x3c, 0xf2, 0xc4, 0xe5, 0x11, 0xf1, - 0x03, 0xa0, 0x7c, 0x09, 0xaa, 0x4b, 0x5f, 0xa7, 0xed, 0x64, 0x35, 0x8e, 0xf7, 0x61, 0x5f, 0xec, - 0xae, 0x53, 0xe7, 0x56, 0xa7, 0xce, 0xb5, 0x06, 0x6e, 0x44, 0x84, 0x85, 0x31, 0x89, 0x26, 0x24, - 0xea, 0xcb, 0x4f, 0xca, 0xc3, 0x68, 0x5a, 0xf8, 0xb4, 0x58, 0x14, 0xf2, 0x10, 0x41, 0x0e, 0xe9, - 0x3d, 0x72, 0x29, 0x1f, 0x25, 0xc7, 0x96, 0x13, 0xfa, 0x7d, 0x1c, 0xb9, 0x21, 0x8b, 0xc2, 0xaf, - 0xe4, 0xc7, 0x87, 0xce, 0xa0, 0x3f, 0xd9, 0xed, 0xb3, 0xb1, 0xdb, 0xc7, 0x8c, 0xc6, 0x7d, 0xcc, - 0x98, 0x47, 0x1d, 0xcc, 0x69, 0x18, 0xf4, 0x27, 0xb7, 0xb0, 0xc7, 0x46, 0xf8, 0x56, 0xdf, 0x25, - 0x01, 0x89, 0x30, 0x27, 0x03, 0xc5, 0xb9, 0xf7, 0x8e, 0x1b, 0x86, 0xae, 0x47, 0xfa, 0x72, 0x75, - 0x9c, 0x0c, 0xfb, 0xc4, 0x67, 0x5c, 0x8b, 0x35, 0xff, 0x72, 0x01, 0xd6, 0x0e, 0x71, 0x40, 0x87, - 0x24, 0xe6, 0x36, 0x79, 0x91, 0x90, 0x98, 0xa3, 0xe7, 0xd0, 0x16, 0xca, 0x18, 0x8d, 0xed, 0xc6, - 0xce, 0xca, 0xee, 0x81, 0x95, 0x6b, 0x63, 0xa5, 0xda, 0xc8, 0x8f, 0xdf, 0x38, 0x03, 0x6b, 0xb2, - 0x6b, 0xb1, 0xb1, 0x6b, 0x09, 0x6d, 0xac, 0x82, 0x36, 0x56, 0xaa, 0x8d, 0x65, 0x67, 0xc7, 0xb2, - 0x25, 0x57, 0xd4, 0x83, 0x4e, 0x44, 0x26, 0x34, 0xa6, 0x61, 0x60, 0x34, 0xb7, 0x1b, 0x3b, 0x5d, - 0x3b, 0x5b, 0x23, 0x03, 0x96, 0x83, 0x70, 0x1f, 0x3b, 0x23, 0x62, 0xb4, 0xb6, 0x1b, 0x3b, 0x1d, - 0x3b, 0x5d, 0xa2, 0x6d, 0x58, 0xc1, 0x8c, 0x3d, 0xc2, 0xc7, 0xc4, 0x7b, 0x48, 0xa6, 0x46, 0x5b, - 0x12, 0x16, 0x41, 0x82, 0x16, 0x33, 0xf6, 0x18, 0xfb, 0xc4, 0x58, 0x94, 0xbb, 0xe9, 0x12, 0x5d, - 0x87, 0x6e, 0x80, 0x7d, 0x12, 0x33, 0xec, 0x10, 0xa3, 0x23, 0xf7, 0x72, 0x00, 0xfa, 0x1d, 0x6c, - 0x14, 0x14, 0x7f, 0x1a, 0x26, 0x91, 0x43, 0x0c, 0x90, 0x47, 0x7f, 0x32, 0xdf, 0xd1, 0xf7, 0xaa, - 0x6c, 0xed, 0x59, 0x49, 0xe8, 0xd7, 0xb0, 0x28, 0x6f, 0xde, 0x58, 0xd9, 0x6e, 0x9d, 0xa9, 0xb5, - 0x15, 0x5b, 0x14, 0xc0, 0x32, 0xf3, 0x12, 0x97, 0x06, 0xb1, 0x71, 0x41, 0x4a, 0x78, 0x36, 0x9f, - 0x84, 0xfd, 0x30, 0x18, 0x52, 0xf7, 0x10, 0x07, 0xd8, 0x25, 0x3e, 0x09, 0xf8, 0x91, 0x64, 0x6e, - 0xa7, 0x42, 0xd0, 0x4b, 0x58, 0x1f, 0x27, 0x31, 0x0f, 0x7d, 0xfa, 0x92, 0x3c, 0x61, 0x82, 0x36, - 0x36, 0x2e, 0x4a, 0x6b, 0x3e, 0x9e, 0x4f, 0xf0, 0xc3, 0x0a, 0x57, 0x7b, 0x46, 0x8e, 0x70, 0x92, - 0x71, 0x72, 0x4c, 0x7e, 0x46, 0x22, 0xe9, 0x5d, 0xab, 0xca, 0x49, 0x0a, 0x20, 0xe5, 0x46, 0x54, - 0xaf, 0x62, 0x63, 0x6d, 0xbb, 0xa5, 0xdc, 0x28, 0x03, 0xa1, 0x1d, 0x58, 0x9b, 0x90, 0x88, 0x0e, - 0xa7, 0x4f, 0xa9, 0x1b, 0x60, 0x9e, 0x44, 0xc4, 0x58, 0x97, 0xae, 0x58, 0x05, 0x23, 0x1f, 0x2e, - 0x8e, 0x88, 0xe7, 0x0b, 0x93, 0xef, 0x47, 0x64, 0x10, 0x1b, 0x1b, 0xd2, 0xbe, 0xf7, 0xe7, 0xbf, - 0x41, 0xc9, 0xce, 0x2e, 0x73, 0x17, 0x8a, 0x05, 0xa1, 0xad, 0x23, 0x45, 0xc5, 0x08, 0x52, 0x8a, - 0x55, 0xc0, 0xe8, 0x06, 0xac, 0xf2, 0x08, 0x3b, 0x63, 0x1a, 0xb8, 0x87, 0x84, 0x8f, 0xc2, 0x81, - 0x71, 0x49, 0x5a, 0xa2, 0x02, 0x45, 0x0e, 0x20, 0x12, 0xe0, 0x63, 0x8f, 0x0c, 0x94, 0x2f, 0x3e, - 0x9b, 0x32, 0x12, 0x1b, 0x97, 0xe5, 0x29, 0x6e, 0x5b, 0x85, 0x0c, 0x55, 0x49, 0x10, 0xd6, 0xdd, - 0x19, 0xaa, 0xbb, 0x01, 0x8f, 0xa6, 0x76, 0x0d, 0x3b, 0x34, 0x86, 0x15, 0x71, 0x8e, 0xd4, 0x15, - 0xae, 0x48, 0x57, 0x78, 0x30, 0x9f, 0x8d, 0x0e, 0x72, 0x86, 0x76, 0x91, 0x3b, 0xb2, 0x00, 0x8d, - 0x70, 0x7c, 0x98, 0x78, 0x9c, 0x32, 0x8f, 0x28, 0x35, 0x62, 0x63, 0x53, 0x9a, 0xa9, 0x66, 0x07, - 0x3d, 0x04, 0x88, 0xc8, 0x30, 0xc5, 0xbb, 0x2a, 0x4f, 0x7e, 0xf3, 0xb4, 0x93, 0xdb, 0x19, 0xb6, - 0x3a, 0x71, 0x81, 0xbc, 0x77, 0x17, 0xae, 0x9e, 0x60, 0x18, 0xb4, 0x0e, 0xad, 0x31, 0x99, 0xca, - 0x84, 0xda, 0xb5, 0xc5, 0x27, 0xba, 0x0c, 0x8b, 0x13, 0xec, 0x25, 0x44, 0xa6, 0xc0, 0x8e, 0xad, - 0x16, 0x77, 0x9a, 0x3f, 0x68, 0xf4, 0xfe, 0xd8, 0x80, 0xb5, 0x8a, 0x98, 0x1a, 0xfa, 0x5f, 0x15, - 0xe9, 0xcf, 0xc0, 0xe9, 0x86, 0xcf, 0x70, 0xe4, 0x12, 0x5e, 0x50, 0xc4, 0xfc, 0x4f, 0x03, 0x8c, - 0xca, 0xf9, 0x7f, 0x4e, 0xf9, 0xe8, 0x1e, 0xf5, 0x48, 0x8c, 0x3e, 0x81, 0xe5, 0x48, 0xc1, 0x74, - 0x99, 0x78, 0xe7, 0x14, 0xb3, 0x1d, 0x2c, 0xd8, 0x29, 0x36, 0xfa, 0x0c, 0x3a, 0x3e, 0xe1, 0x78, - 0x80, 0x39, 0xd6, 0xba, 0x6f, 0xd7, 0x51, 0x0a, 0x29, 0x87, 0x1a, 0xef, 0x60, 0xc1, 0xce, 0x68, - 0xd0, 0x47, 0xb0, 0xe8, 0x8c, 0x92, 0x60, 0x2c, 0x0b, 0xc4, 0xca, 0xee, 0xbb, 0x27, 0x11, 0xef, - 0x0b, 0xa4, 0x83, 0x05, 0x5b, 0x61, 0x7f, 0xbe, 0x04, 0x6d, 0x86, 0x23, 0x6e, 0xde, 0x83, 0xcb, - 0x75, 0x22, 0x44, 0x55, 0x72, 0x46, 0xc4, 0x19, 0xc7, 0x89, 0xaf, 0xcd, 0x9c, 0xad, 0x11, 0x82, - 0x76, 0x4c, 0x5f, 0x2a, 0x53, 0xb7, 0x6c, 0xf9, 0x6d, 0x7e, 0x1f, 0x36, 0x66, 0xa4, 0x89, 0x4b, - 0x55, 0xba, 0x09, 0x0e, 0x17, 0xb4, 0x68, 0x33, 0x81, 0x2b, 0xcf, 0xa4, 0x2d, 0xb2, 0xd4, 0x7c, - 0x1e, 0x75, 0xd6, 0x3c, 0x80, 0xcd, 0xaa, 0xd8, 0x98, 0x85, 0x41, 0x4c, 0x44, 0x94, 0xc8, 0x5c, - 0x46, 0xc9, 0x20, 0xdf, 0x95, 0x5a, 0x74, 0xec, 0x9a, 0x1d, 0xf3, 0xf7, 0x4d, 0xd8, 0xb4, 0x49, - 0x1c, 0x7a, 0x13, 0x92, 0x26, 0x9a, 0xf3, 0x69, 0x15, 0x7e, 0x09, 0x2d, 0xcc, 0x98, 0x76, 0x93, - 0x07, 0x67, 0x56, 0x8c, 0x6d, 0xc1, 0x15, 0x7d, 0x00, 0x1b, 0xd8, 0x3f, 0xa6, 0x6e, 0x12, 0x26, - 0x71, 0x7a, 0x2c, 0xe9, 0x54, 0x5d, 0x7b, 0x76, 0xc3, 0x74, 0xe0, 0xea, 0x8c, 0x09, 0xb4, 0x39, - 0x8b, 0x0d, 0x4d, 0xa3, 0xd2, 0xd0, 0xd4, 0x0a, 0x69, 0x9e, 0x24, 0xe4, 0x1f, 0x0d, 0x58, 0xcf, - 0x43, 0x47, 0xb3, 0xbf, 0x0e, 0x5d, 0x5f, 0xc3, 0x62, 0xa3, 0x21, 0x0b, 0x56, 0x0e, 0x28, 0xf7, - 0x36, 0xcd, 0x6a, 0x6f, 0xb3, 0x09, 0x4b, 0xaa, 0xf5, 0xd4, 0x07, 0xd3, 0xab, 0x92, 0xca, 0xed, - 0x8a, 0xca, 0x5b, 0x00, 0x71, 0x96, 0xbf, 0x8c, 0x25, 0xb9, 0x5b, 0x80, 0x20, 0x13, 0x2e, 0xa8, - 0x4a, 0x68, 0x93, 0x38, 0xf1, 0xb8, 0xb1, 0x2c, 0x31, 0x4a, 0x30, 0x33, 0x84, 0xb5, 0x47, 0x54, - 0x9c, 0x61, 0x18, 0x9f, 0x8f, 0xb3, 0x7f, 0x0c, 0x6d, 0x21, 0x4c, 0x1c, 0xec, 0x38, 0xc2, 0x81, - 0x33, 0x22, 0xa9, 0xad, 0xb2, 0xb5, 0x08, 0x63, 0x8e, 0xdd, 0xd8, 0x68, 0x4a, 0xb8, 0xfc, 0x36, - 0xff, 0xda, 0x54, 0x9a, 0xee, 0x31, 0x16, 0x7f, 0xf3, 0xed, 0x6f, 0x7d, 0x41, 0x6e, 0xcd, 0x16, - 0xe4, 0x8a, 0xca, 0x5f, 0xa7, 0x20, 0x9f, 0x51, 0x99, 0x32, 0x13, 0x58, 0xde, 0x63, 0x4c, 0x28, - 0x82, 0x6e, 0x41, 0x1b, 0x33, 0xa6, 0x0c, 0x5e, 0xc9, 0xc8, 0x1a, 0x45, 0xfc, 0xd7, 0x2a, 0x49, - 0xd4, 0xde, 0x27, 0xd0, 0xcd, 0x40, 0xaf, 0x13, 0xdb, 0x2d, 0x8a, 0xdd, 0x06, 0x50, 0x1d, 0xe7, - 0x83, 0x60, 0x18, 0x8a, 0x2b, 0x15, 0xce, 0xae, 0x49, 0xe5, 0xb7, 0x79, 0x27, 0xc5, 0x90, 0xba, - 0x7d, 0x00, 0x8b, 0x94, 0x13, 0x3f, 0x55, 0x6e, 0xb3, 0xa8, 0x5c, 0xce, 0xc8, 0x56, 0x48, 0xe6, - 0x3f, 0x3b, 0x70, 0x4d, 0xdc, 0xd8, 0x53, 0x19, 0x26, 0x7b, 0x8c, 0x7d, 0x41, 0x38, 0xa6, 0x5e, - 0xfc, 0x93, 0x84, 0x44, 0xd3, 0xb7, 0xec, 0x18, 0x2e, 0x2c, 0xa9, 0x28, 0xd3, 0xf9, 0xee, 0xcc, - 0x87, 0x0f, 0xcd, 0x3e, 0x9f, 0x38, 0x5a, 0x6f, 0x67, 0xe2, 0xa8, 0x9b, 0x00, 0xda, 0xe7, 0x34, - 0x01, 0x9c, 0x3c, 0x04, 0x16, 0x46, 0xcb, 0xa5, 0xf2, 0x68, 0x59, 0xd3, 0x58, 0x2f, 0xbf, 0x69, - 0x63, 0xdd, 0xa9, 0x6d, 0xac, 0xfd, 0xda, 0x38, 0xee, 0x4a, 0x73, 0xff, 0xa8, 0xe8, 0x81, 0x27, - 0xfa, 0xda, 0x3c, 0x2d, 0x36, 0xbc, 0xd5, 0x16, 0xfb, 0xa7, 0xa5, 0x96, 0x59, 0x0d, 0xad, 0x1f, - 0xbd, 0xd9, 0x99, 0xbe, 0x4d, 0xcd, 0xf3, 0x1f, 0x64, 0xcf, 0xc4, 0xc2, 0xdc, 0x06, 0x59, 0x41, - 0x17, 0x75, 0x48, 0x94, 0x56, 0x9d, 0xb4, 0xc4, 0x37, 0xba, 0x09, 0x6d, 0x61, 0x64, 0xdd, 0xd4, - 0x5e, 0x2d, 0xda, 0x53, 0xdc, 0xc4, 0x1e, 0x63, 0x4f, 0x19, 0x71, 0x6c, 0x89, 0x84, 0xee, 0x40, - 0x37, 0x73, 0x7c, 0x1d, 0x59, 0xd7, 0x8b, 0x14, 0x59, 0x9c, 0xa4, 0x64, 0x39, 0xba, 0xa0, 0x1d, - 0xd0, 0x88, 0x38, 0xb2, 0xe5, 0x5b, 0x9c, 0xa5, 0xfd, 0x22, 0xdd, 0xcc, 0x68, 0x33, 0x74, 0x74, - 0x0b, 0x96, 0xd4, 0x94, 0x2f, 0x23, 0x68, 0x65, 0xf7, 0xda, 0x6c, 0x32, 0x4d, 0xa9, 0x34, 0xa2, - 0xf9, 0xf7, 0x06, 0xbc, 0x97, 0x3b, 0x44, 0x1a, 0x4d, 0x69, 0xd7, 0xfd, 0xcd, 0x57, 0xdc, 0x1b, - 0xb0, 0x2a, 0xdb, 0xfc, 0x7c, 0xd8, 0x57, 0xef, 0x4e, 0x15, 0xa8, 0xf9, 0xb7, 0x26, 0xac, 0x14, - 0x2e, 0xa2, 0xae, 0xf0, 0x88, 0xc6, 0x49, 0xde, 0xbf, 0x1c, 0x90, 0x64, 0x72, 0xed, 0xda, 0x05, - 0x08, 0x1a, 0x03, 0x30, 0x1c, 0x61, 0x9f, 0x70, 0x12, 0x89, 0x8c, 0x28, 0x22, 0xe7, 0xe1, 0xfc, - 0x51, 0x7a, 0x94, 0xf2, 0xb4, 0x0b, 0xec, 0x45, 0xe7, 0x27, 0x45, 0xc7, 0x3a, 0x0f, 0xea, 0x15, - 0xfa, 0x2d, 0xac, 0x0e, 0xa9, 0x47, 0x8e, 0x72, 0x45, 0x96, 0xa4, 0x22, 0x4f, 0xe6, 0x57, 0xe4, - 0x5e, 0x91, 0xaf, 0x5d, 0x11, 0x63, 0xbe, 0x0f, 0xeb, 0x55, 0xbf, 0x14, 0x4a, 0x52, 0x1f, 0xbb, - 0x99, 0xb5, 0xf4, 0xca, 0x44, 0xb0, 0x5e, 0xf5, 0x43, 0xf3, 0xbf, 0x4d, 0xb8, 0x92, 0xb1, 0xdb, - 0x0b, 0x82, 0x30, 0x09, 0x1c, 0xf9, 0x00, 0x55, 0x7b, 0x17, 0x97, 0x61, 0x91, 0x53, 0xee, 0x65, - 0x0d, 0x84, 0x5c, 0x88, 0x1a, 0xc0, 0xc3, 0xd0, 0xe3, 0x94, 0xe9, 0x7e, 0x38, 0x5d, 0x2a, 0x1f, - 0x79, 0x91, 0xd0, 0x88, 0x0c, 0x64, 0x44, 0x75, 0xec, 0x6c, 0x2d, 0xf6, 0x44, 0x77, 0x20, 0xdb, - 0x61, 0x65, 0xcc, 0x6c, 0x2d, 0xfd, 0x27, 0xf4, 0x3c, 0xe2, 0x08, 0x73, 0x14, 0x1a, 0xe6, 0x0a, - 0x54, 0x36, 0xe2, 0x3c, 0xa2, 0x81, 0xab, 0xdb, 0x65, 0xbd, 0x12, 0x7a, 0xe2, 0x28, 0xc2, 0x53, - 0xa3, 0x23, 0x0d, 0xa0, 0x16, 0xe8, 0x87, 0xd0, 0xf2, 0x31, 0xd3, 0x05, 0xe3, 0xfd, 0x52, 0x94, - 0xd5, 0x59, 0xc0, 0x3a, 0xc4, 0x4c, 0x65, 0x54, 0x41, 0xd6, 0xfb, 0x18, 0x3a, 0x29, 0xe0, 0x6b, - 0xb5, 0x56, 0x5f, 0xc1, 0xc5, 0x52, 0x10, 0xa3, 0x2f, 0x61, 0x33, 0xf7, 0xa8, 0xa2, 0x40, 0xdd, - 0x4c, 0xbd, 0xf7, 0x5a, 0xcd, 0xec, 0x13, 0x18, 0x98, 0x2f, 0x60, 0x43, 0xb8, 0xcc, 0xfe, 0x08, - 0x47, 0xfc, 0x9c, 0x46, 0x84, 0x4f, 0xa1, 0x9b, 0x89, 0xac, 0xf5, 0x99, 0x1e, 0x74, 0x26, 0xe9, - 0xc3, 0xa0, 0x9a, 0x11, 0xb2, 0xb5, 0xb9, 0x07, 0xa8, 0xa8, 0xaf, 0xce, 0xe4, 0x37, 0xcb, 0xcd, - 0xe5, 0x95, 0x6a, 0xda, 0x96, 0xe8, 0xba, 0xb7, 0xdc, 0xfd, 0xd3, 0x32, 0x6c, 0xe4, 0xa9, 0x50, - 0xfc, 0xa5, 0x0e, 0x41, 0x4f, 0x60, 0xfd, 0xbe, 0x7e, 0xaf, 0x4f, 0x27, 0x3f, 0x74, 0xda, 0x53, - 0x4a, 0xef, 0x7a, 0xfd, 0xa6, 0xd2, 0xc8, 0x5c, 0x40, 0x0e, 0x5c, 0xab, 0x32, 0xcc, 0x5f, 0x6d, - 0xbe, 0x7b, 0x0a, 0xe7, 0x0c, 0xeb, 0x75, 0x22, 0x76, 0x1a, 0xe8, 0x4b, 0x58, 0x2d, 0xbf, 0x2d, - 0xa0, 0x92, 0x2f, 0xd4, 0x3e, 0x77, 0xf4, 0xcc, 0xd3, 0x50, 0x32, 0xfd, 0x9f, 0x8b, 0x02, 0x5e, - 0x1a, 0xb4, 0x91, 0x59, 0x6e, 0x2f, 0xea, 0x1e, 0x22, 0x7a, 0xdf, 0x39, 0x15, 0x27, 0xe3, 0xfe, - 0x29, 0x74, 0xd2, 0xc1, 0xb4, 0x6c, 0xe6, 0xca, 0xb8, 0xda, 0x5b, 0x2f, 0xf3, 0x1b, 0xc6, 0xe6, - 0x02, 0xfa, 0x4c, 0x11, 0x8b, 0xc1, 0x65, 0x96, 0xb8, 0x30, 0x8e, 0xf5, 0x2e, 0xd5, 0x8c, 0x40, - 0xe6, 0x02, 0xfa, 0x31, 0xac, 0x88, 0xaf, 0x23, 0xfd, 0x52, 0xbe, 0x69, 0xa9, 0x1f, 0x66, 0xac, - 0xf4, 0x87, 0x19, 0xeb, 0xae, 0xcf, 0xf8, 0xb4, 0x57, 0x33, 0xa3, 0x68, 0x06, 0xcf, 0xe1, 0xe2, - 0x7d, 0xc2, 0xf3, 0x96, 0x02, 0x7d, 0xef, 0x8d, 0x1a, 0xaf, 0x9e, 0x59, 0x45, 0x9b, 0xed, 0x4a, - 0xcc, 0x05, 0xf4, 0xe7, 0x06, 0x5c, 0xba, 0x4f, 0x78, 0xb5, 0x48, 0xa3, 0x0f, 0xeb, 0x85, 0x9c, - 0x50, 0xcc, 0x7b, 0x8f, 0xe7, 0x8d, 0xdb, 0x32, 0x5b, 0x73, 0x01, 0x1d, 0xc9, 0x63, 0xe7, 0xf1, - 0x87, 0xde, 0xad, 0x0d, 0xb4, 0xcc, 0xfc, 0x5b, 0x27, 0x6d, 0xa7, 0x47, 0xfd, 0x7c, 0xef, 0x5f, - 0xaf, 0xb6, 0x1a, 0xff, 0x7e, 0xb5, 0xd5, 0xf8, 0xdf, 0xab, 0xad, 0xc6, 0x2f, 0x6e, 0xbf, 0xe6, - 0xe7, 0xb6, 0xc2, 0x2f, 0x78, 0x98, 0x51, 0xc7, 0xa3, 0x24, 0xe0, 0xc7, 0x4b, 0xf2, 0xd6, 0x6e, - 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x46, 0x3e, 0x2d, 0xff, 0xe0, 0x1b, 0x00, 0x00, + // 2096 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xdb, 0x6e, 0x1b, 0xc7, + 0xf9, 0xe7, 0x92, 0x94, 0x44, 0x7e, 0x92, 0x25, 0x6a, 0xac, 0xc3, 0x9a, 0x71, 0x04, 0x65, 0xff, + 0x7f, 0x1b, 0xaa, 0x9d, 0x90, 0x90, 0x8c, 0xc4, 0x85, 0x93, 0xa6, 0x50, 0x14, 0x5b, 0x72, 0x6c, + 0xd9, 0xea, 0xda, 0x6d, 0x91, 0xd6, 0x6d, 0x31, 0x5c, 0x0e, 0xc9, 0x09, 0xf7, 0x30, 0xde, 0x9d, + 0x55, 0x20, 0x03, 0xbd, 0x28, 0x5a, 0xf4, 0x11, 0x8a, 0xa2, 0xaf, 0x51, 0x14, 0xbd, 0xec, 0x55, + 0x0f, 0x97, 0x41, 0x5f, 0xa0, 0x85, 0x6f, 0xfa, 0x1a, 0xc5, 0xcc, 0xce, 0x1e, 0xb9, 0x92, 0x9d, + 0x52, 0x56, 0x50, 0xf4, 0xc6, 0xde, 0x99, 0xf9, 0xe6, 0x3b, 0xcd, 0x77, 0xf8, 0xcd, 0x50, 0x70, + 0xdd, 0x27, 0xcc, 0x0b, 0x88, 0x7f, 0x4c, 0xfc, 0xae, 0xfc, 0xa4, 0xdc, 0xf3, 0x4f, 0x32, 0x9f, + 0x1d, 0xe6, 0x7b, 0xdc, 0x43, 0x90, 0xce, 0xb4, 0x1f, 0x0e, 0x29, 0x1f, 0x85, 0xbd, 0x8e, 0xe5, + 0x39, 0x5d, 0xec, 0x0f, 0x3d, 0xe6, 0x7b, 0x5f, 0xc8, 0x8f, 0xf7, 0xac, 0x7e, 0xf7, 0x78, 0xa7, + 0xcb, 0xc6, 0xc3, 0x2e, 0x66, 0x34, 0xe8, 0x62, 0xc6, 0x6c, 0x6a, 0x61, 0x4e, 0x3d, 0xb7, 0x7b, + 0xbc, 0x8d, 0x6d, 0x36, 0xc2, 0xdb, 0xdd, 0x21, 0x71, 0x89, 0x8f, 0x39, 0xe9, 0x47, 0x9c, 0xdb, + 0x6f, 0x0d, 0x3d, 0x6f, 0x68, 0x93, 0xae, 0x1c, 0xf5, 0xc2, 0x41, 0x97, 0x38, 0x8c, 0x2b, 0xb1, + 0xc6, 0x6f, 0x17, 0x60, 0xe9, 0x10, 0xbb, 0x74, 0x40, 0x02, 0x6e, 0x92, 0xe7, 0x21, 0x09, 0x38, + 0x7a, 0x06, 0x75, 0xa1, 0x8c, 0xae, 0x6d, 0x6a, 0x5b, 0xf3, 0x3b, 0x07, 0x9d, 0x54, 0x9b, 0x4e, + 0xac, 0x8d, 0xfc, 0xf8, 0x99, 0xd5, 0xef, 0x1c, 0xef, 0x74, 0xd8, 0x78, 0xd8, 0x11, 0xda, 0x74, + 0x32, 0xda, 0x74, 0x62, 0x6d, 0x3a, 0x66, 0x62, 0x96, 0x29, 0xb9, 0xa2, 0x36, 0x34, 0x7c, 0x72, + 0x4c, 0x03, 0xea, 0xb9, 0x7a, 0x75, 0x53, 0xdb, 0x6a, 0x9a, 0xc9, 0x18, 0xe9, 0x30, 0xe7, 0x7a, + 0x7b, 0xd8, 0x1a, 0x11, 0xbd, 0xb6, 0xa9, 0x6d, 0x35, 0xcc, 0x78, 0x88, 0x36, 0x61, 0x1e, 0x33, + 0xf6, 0x10, 0xf7, 0x88, 0xfd, 0x80, 0x9c, 0xe8, 0x75, 0xb9, 0x31, 0x3b, 0x25, 0xf6, 0x62, 0xc6, + 0x1e, 0x61, 0x87, 0xe8, 0x33, 0x72, 0x35, 0x1e, 0xa2, 0xab, 0xd0, 0x74, 0xb1, 0x43, 0x02, 0x86, + 0x2d, 0xa2, 0x37, 0xe4, 0x5a, 0x3a, 0x81, 0x7e, 0x0e, 0xcb, 0x19, 0xc5, 0x9f, 0x78, 0xa1, 0x6f, + 0x11, 0x1d, 0xa4, 0xe9, 0x8f, 0xa7, 0x33, 0x7d, 0xb7, 0xc8, 0xd6, 0x9c, 0x94, 0x84, 0x7e, 0x0a, + 0x33, 0xf2, 0xe4, 0xf5, 0xf9, 0xcd, 0xda, 0xb9, 0x7a, 0x3b, 0x62, 0x8b, 0x5c, 0x98, 0x63, 0x76, + 0x38, 0xa4, 0x6e, 0xa0, 0x2f, 0x48, 0x09, 0x4f, 0xa7, 0x93, 0xb0, 0xe7, 0xb9, 0x03, 0x3a, 0x3c, + 0xc4, 0x2e, 0x1e, 0x12, 0x87, 0xb8, 0xfc, 0x48, 0x32, 0x37, 0x63, 0x21, 0xe8, 0x05, 0xb4, 0xc6, + 0x61, 0xc0, 0x3d, 0x87, 0xbe, 0x20, 0x8f, 0x99, 0xd8, 0x1b, 0xe8, 0x97, 0xa4, 0x37, 0x1f, 0x4d, + 0x27, 0xf8, 0x41, 0x81, 0xab, 0x39, 0x21, 0x47, 0x04, 0xc9, 0x38, 0xec, 0x91, 0x1f, 0x10, 0x5f, + 0x46, 0xd7, 0x62, 0x14, 0x24, 0x99, 0xa9, 0x28, 0x8c, 0xa8, 0x1a, 0x05, 0xfa, 0xd2, 0x66, 0x2d, + 0x0a, 0xa3, 0x64, 0x0a, 0x6d, 0xc1, 0xd2, 0x31, 0xf1, 0xe9, 0xe0, 0xe4, 0x09, 0x1d, 0xba, 0x98, + 0x87, 0x3e, 0xd1, 0x5b, 0x32, 0x14, 0x8b, 0xd3, 0xc8, 0x81, 0x4b, 0x23, 0x62, 0x3b, 0xc2, 0xe5, + 0x7b, 0x3e, 0xe9, 0x07, 0xfa, 0xb2, 0xf4, 0xef, 0xfe, 0xf4, 0x27, 0x28, 0xd9, 0x99, 0x79, 0xee, + 0x42, 0x31, 0xd7, 0x33, 0x55, 0xa6, 0x44, 0x39, 0x82, 0x22, 0xc5, 0x0a, 0xd3, 0xe8, 0x3a, 0x2c, + 0x72, 0x1f, 0x5b, 0x63, 0xea, 0x0e, 0x0f, 0x09, 0x1f, 0x79, 0x7d, 0xfd, 0xb2, 0xf4, 0x44, 0x61, + 0x16, 0x59, 0x80, 0x88, 0x8b, 0x7b, 0x36, 0xe9, 0x47, 0xb1, 0xf8, 0xf4, 0x84, 0x91, 0x40, 0x5f, + 0x91, 0x56, 0xdc, 0xea, 0x64, 0x2a, 0x54, 0xa1, 0x40, 0x74, 0xee, 0x4e, 0xec, 0xba, 0xeb, 0x72, + 0xff, 0xc4, 0x2c, 0x61, 0x87, 0xc6, 0x30, 0x2f, 0xec, 0x88, 0x43, 0x61, 0x55, 0x86, 0xc2, 0xfd, + 0xe9, 0x7c, 0x74, 0x90, 0x32, 0x34, 0xb3, 0xdc, 0x51, 0x07, 0xd0, 0x08, 0x07, 0x87, 0xa1, 0xcd, + 0x29, 0xb3, 0x49, 0xa4, 0x46, 0xa0, 0xaf, 0x49, 0x37, 0x95, 0xac, 0xa0, 0x07, 0x00, 0x3e, 0x19, + 0xc4, 0x74, 0xeb, 0xd2, 0xf2, 0x9b, 0x67, 0x59, 0x6e, 0x26, 0xd4, 0x91, 0xc5, 0x99, 0xed, 0xed, + 0xbb, 0xb0, 0x7e, 0x8a, 0x63, 0x50, 0x0b, 0x6a, 0x63, 0x72, 0x22, 0x0b, 0x6a, 0xd3, 0x14, 0x9f, + 0x68, 0x05, 0x66, 0x8e, 0xb1, 0x1d, 0x12, 0x59, 0x02, 0x1b, 0x66, 0x34, 0xb8, 0x53, 0xfd, 0xb6, + 0xd6, 0xfe, 0xb5, 0x06, 0x4b, 0x05, 0x31, 0x25, 0xfb, 0x7f, 0x92, 0xdd, 0x7f, 0x0e, 0x41, 0x37, + 0x78, 0x8a, 0xfd, 0x21, 0xe1, 0x19, 0x45, 0x8c, 0xbf, 0x6b, 0xa0, 0x17, 0xec, 0xff, 0x21, 0xe5, + 0xa3, 0x7b, 0xd4, 0x26, 0x01, 0xba, 0x0d, 0x73, 0x7e, 0x34, 0xa7, 0xda, 0xc4, 0x5b, 0x67, 0xb8, + 0xed, 0xa0, 0x62, 0xc6, 0xd4, 0xe8, 0x63, 0x68, 0x38, 0x84, 0xe3, 0x3e, 0xe6, 0x58, 0xe9, 0xbe, + 0x59, 0xb6, 0x53, 0x48, 0x39, 0x54, 0x74, 0x07, 0x15, 0x33, 0xd9, 0x83, 0xde, 0x87, 0x19, 0x6b, + 0x14, 0xba, 0x63, 0xd9, 0x20, 0xe6, 0x77, 0xde, 0x3e, 0x6d, 0xf3, 0x9e, 0x20, 0x3a, 0xa8, 0x98, + 0x11, 0xf5, 0x27, 0xb3, 0x50, 0x67, 0xd8, 0xe7, 0xc6, 0x3d, 0x58, 0x29, 0x13, 0x21, 0xba, 0x92, + 0x35, 0x22, 0xd6, 0x38, 0x08, 0x1d, 0xe5, 0xe6, 0x64, 0x8c, 0x10, 0xd4, 0x03, 0xfa, 0x22, 0x72, + 0x75, 0xcd, 0x94, 0xdf, 0xc6, 0xb7, 0x60, 0x79, 0x42, 0x9a, 0x38, 0xd4, 0x48, 0x37, 0xc1, 0x61, + 0x41, 0x89, 0x36, 0x42, 0x58, 0x7d, 0x2a, 0x7d, 0x91, 0x94, 0xe6, 0x8b, 0xe8, 0xb3, 0xc6, 0x01, + 0xac, 0x15, 0xc5, 0x06, 0xcc, 0x73, 0x03, 0x22, 0xb2, 0x44, 0xd6, 0x32, 0x4a, 0xfa, 0xe9, 0xaa, + 0xd4, 0xa2, 0x61, 0x96, 0xac, 0x18, 0xbf, 0xa8, 0xc2, 0x9a, 0x49, 0x02, 0xcf, 0x3e, 0x26, 0x71, + 0xa1, 0xb9, 0x18, 0xa8, 0xf0, 0x63, 0xa8, 0x61, 0xc6, 0x54, 0x98, 0xdc, 0x3f, 0xb7, 0x66, 0x6c, + 0x0a, 0xae, 0xe8, 0x5d, 0x58, 0xc6, 0x4e, 0x8f, 0x0e, 0x43, 0x2f, 0x0c, 0x62, 0xb3, 0x64, 0x50, + 0x35, 0xcd, 0xc9, 0x05, 0xc3, 0x82, 0xf5, 0x09, 0x17, 0x28, 0x77, 0x66, 0x01, 0x8d, 0x56, 0x00, + 0x34, 0xa5, 0x42, 0xaa, 0xa7, 0x09, 0xf9, 0x8b, 0x06, 0xad, 0x34, 0x75, 0x14, 0xfb, 0xab, 0xd0, + 0x74, 0xd4, 0x5c, 0xa0, 0x6b, 0xb2, 0x61, 0xa5, 0x13, 0x79, 0x6c, 0x53, 0x2d, 0x62, 0x9b, 0x35, + 0x98, 0x8d, 0xa0, 0xa7, 0x32, 0x4c, 0x8d, 0x72, 0x2a, 0xd7, 0x0b, 0x2a, 0x6f, 0x00, 0x04, 0x49, + 0xfd, 0xd2, 0x67, 0xe5, 0x6a, 0x66, 0x06, 0x19, 0xb0, 0x10, 0x75, 0x42, 0x93, 0x04, 0xa1, 0xcd, + 0xf5, 0x39, 0x49, 0x91, 0x9b, 0x33, 0x3c, 0x58, 0x7a, 0x48, 0x85, 0x0d, 0x83, 0xe0, 0x62, 0x82, + 0xfd, 0x03, 0xa8, 0x0b, 0x61, 0xc2, 0xb0, 0x9e, 0x8f, 0x5d, 0x6b, 0x44, 0x62, 0x5f, 0x25, 0x63, + 0x91, 0xc6, 0x1c, 0x0f, 0x03, 0xbd, 0x2a, 0xe7, 0xe5, 0xb7, 0xf1, 0xc7, 0x6a, 0xa4, 0xe9, 0x2e, + 0x63, 0xc1, 0x37, 0x0f, 0x7f, 0xcb, 0x1b, 0x72, 0x6d, 0xb2, 0x21, 0x17, 0x54, 0xfe, 0x3a, 0x0d, + 0xf9, 0x9c, 0xda, 0x94, 0x11, 0xc2, 0xdc, 0x2e, 0x63, 0x42, 0x11, 0xb4, 0x0d, 0x75, 0xcc, 0x58, + 0xe4, 0xf0, 0x42, 0x45, 0x56, 0x24, 0xe2, 0x7f, 0xa5, 0x92, 0x24, 0x6d, 0xdf, 0x86, 0x66, 0x32, + 0xf5, 0x2a, 0xb1, 0xcd, 0xac, 0xd8, 0x4d, 0x80, 0x08, 0x71, 0xde, 0x77, 0x07, 0x9e, 0x38, 0x52, + 0x11, 0xec, 0x6a, 0xab, 0xfc, 0x36, 0xee, 0xc4, 0x14, 0x52, 0xb7, 0x77, 0x61, 0x86, 0x72, 0xe2, + 0xc4, 0xca, 0xad, 0x65, 0x95, 0x4b, 0x19, 0x99, 0x11, 0x91, 0xf1, 0xd7, 0x06, 0x5c, 0x11, 0x27, + 0xf6, 0x44, 0xa6, 0xc9, 0x2e, 0x63, 0x9f, 0x12, 0x8e, 0xa9, 0x1d, 0x7c, 0x2f, 0x24, 0xfe, 0xc9, + 0x1b, 0x0e, 0x8c, 0x21, 0xcc, 0x46, 0x59, 0xa6, 0xea, 0xdd, 0xb9, 0x5f, 0x3e, 0x14, 0xfb, 0xf4, + 0xc6, 0x51, 0x7b, 0x33, 0x37, 0x8e, 0xb2, 0x1b, 0x40, 0xfd, 0x82, 0x6e, 0x00, 0xa7, 0x5f, 0x02, + 0x33, 0x57, 0xcb, 0xd9, 0xfc, 0xd5, 0xb2, 0x04, 0x58, 0xcf, 0xbd, 0x2e, 0xb0, 0x6e, 0x94, 0x02, + 0x6b, 0xa7, 0x34, 0x8f, 0x9b, 0xd2, 0xdd, 0xdf, 0xc9, 0x46, 0xe0, 0xa9, 0xb1, 0x36, 0x0d, 0xc4, + 0x86, 0x37, 0x0a, 0xb1, 0xbf, 0x9f, 0x83, 0xcc, 0xd1, 0xa5, 0xf5, 0xfd, 0xd7, 0xb3, 0xe9, 0x7f, + 0x09, 0x3c, 0xff, 0x4a, 0x62, 0x26, 0xe6, 0xa5, 0x3e, 0x48, 0x1a, 0xba, 0xe8, 0x43, 0xa2, 0xb5, + 0xaa, 0xa2, 0x25, 0xbe, 0xd1, 0x4d, 0xa8, 0x0b, 0x27, 0x2b, 0x50, 0xbb, 0x9e, 0xf5, 0xa7, 0x38, + 0x89, 0x5d, 0xc6, 0x9e, 0x30, 0x62, 0x99, 0x92, 0x08, 0xdd, 0x81, 0x66, 0x12, 0xf8, 0x2a, 0xb3, + 0xae, 0x66, 0x77, 0x24, 0x79, 0x12, 0x6f, 0x4b, 0xc9, 0xc5, 0xde, 0x3e, 0xf5, 0x89, 0x25, 0x21, + 0xdf, 0xcc, 0xe4, 0xde, 0x4f, 0xe3, 0xc5, 0x64, 0x6f, 0x42, 0x8e, 0xb6, 0x61, 0x36, 0xba, 0xe5, + 0xcb, 0x0c, 0x9a, 0xdf, 0xb9, 0x32, 0x59, 0x4c, 0xe3, 0x5d, 0x8a, 0xd0, 0xf8, 0xb3, 0x06, 0xef, + 0xa4, 0x01, 0x11, 0x67, 0x53, 0x8c, 0xba, 0xbf, 0xf9, 0x8e, 0x7b, 0x1d, 0x16, 0x25, 0xcc, 0x4f, + 0x2f, 0xfb, 0xd1, 0xbb, 0x53, 0x61, 0xd6, 0xf8, 0x83, 0x06, 0xd7, 0x26, 0xed, 0xd8, 0x1b, 0x61, + 0x9f, 0x27, 0xc7, 0x7b, 0x11, 0xb6, 0xc4, 0x0d, 0xaf, 0x9a, 0x36, 0xbc, 0x9c, 0x7d, 0xb5, 0xbc, + 0x7d, 0xc6, 0x9f, 0xaa, 0x30, 0x9f, 0x09, 0xa0, 0xb2, 0x86, 0x29, 0x00, 0x9f, 0x8c, 0x5b, 0x79, + 0xb1, 0x93, 0x4d, 0xa1, 0x69, 0x66, 0x66, 0xd0, 0x18, 0x80, 0x61, 0x1f, 0x3b, 0x84, 0x13, 0x5f, + 0x54, 0x72, 0x91, 0xf1, 0x0f, 0xa6, 0xaf, 0x2e, 0x47, 0x31, 0x4f, 0x33, 0xc3, 0x5e, 0x20, 0x56, + 0x29, 0x3a, 0x50, 0xf5, 0x5b, 0x8d, 0xd0, 0x97, 0xb0, 0x38, 0xa0, 0x36, 0x39, 0x4a, 0x15, 0x99, + 0x95, 0x8a, 0x3c, 0x9e, 0x5e, 0x91, 0x7b, 0x59, 0xbe, 0x66, 0x41, 0x8c, 0x71, 0x03, 0x5a, 0xc5, + 0x7c, 0x12, 0x4a, 0x52, 0x07, 0x0f, 0x13, 0x6f, 0xa9, 0x91, 0x81, 0xa0, 0x55, 0xcc, 0x1f, 0xe3, + 0x1f, 0x55, 0x58, 0x4d, 0xd8, 0xed, 0xba, 0xae, 0x17, 0xba, 0x96, 0x7c, 0x38, 0x2b, 0x3d, 0x8b, + 0x15, 0x98, 0xe1, 0x94, 0xdb, 0x09, 0xf0, 0x91, 0x03, 0xd1, 0xbb, 0xb8, 0xe7, 0xd9, 0x9c, 0x32, + 0x75, 0xc0, 0xf1, 0x30, 0x3a, 0xfb, 0xe7, 0x21, 0xf5, 0x49, 0x5f, 0x56, 0x82, 0x86, 0x99, 0x8c, + 0xc5, 0x9a, 0x40, 0x35, 0x12, 0xc6, 0x47, 0xce, 0x4c, 0xc6, 0x32, 0xee, 0x3d, 0xdb, 0x26, 0x96, + 0x70, 0x47, 0x06, 0xe8, 0x17, 0x66, 0xe5, 0x05, 0x82, 0xfb, 0xd4, 0x1d, 0x2a, 0x98, 0xaf, 0x46, + 0x42, 0x4f, 0xec, 0xfb, 0xf8, 0x44, 0x6f, 0x48, 0x07, 0x44, 0x03, 0xf4, 0x11, 0xd4, 0x1c, 0xcc, + 0x54, 0xa3, 0xbb, 0x91, 0xab, 0x0e, 0x65, 0x1e, 0xe8, 0x1c, 0x62, 0x16, 0x75, 0x02, 0xb1, 0xad, + 0xfd, 0x01, 0x34, 0xe2, 0x89, 0xaf, 0x05, 0x09, 0xbf, 0x80, 0x4b, 0xb9, 0xe2, 0x83, 0x3e, 0x87, + 0xb5, 0x34, 0xa2, 0xb2, 0x02, 0x15, 0x08, 0x7c, 0xe7, 0x95, 0x9a, 0x99, 0xa7, 0x30, 0x30, 0x9e, + 0xc3, 0xb2, 0x08, 0x19, 0x99, 0xf8, 0x17, 0x74, 0xb5, 0xf9, 0x10, 0x9a, 0x89, 0xc8, 0xd2, 0x98, + 0x69, 0x43, 0xe3, 0x38, 0x7e, 0xd0, 0x8c, 0xee, 0x36, 0xc9, 0xd8, 0xd8, 0x05, 0x94, 0xd5, 0x57, + 0x75, 0xa0, 0x9b, 0x79, 0x50, 0xbc, 0x5a, 0x6c, 0x37, 0x92, 0x3c, 0xc6, 0xc4, 0xbf, 0xaf, 0xc2, + 0xd2, 0x3e, 0x95, 0xaf, 0x1c, 0x17, 0x54, 0xe4, 0x6e, 0x40, 0x2b, 0x08, 0x7b, 0x8e, 0xd7, 0x0f, + 0x6d, 0xa2, 0x40, 0x81, 0xea, 0xf4, 0x13, 0xf3, 0x67, 0x15, 0x3f, 0xe1, 0x2c, 0x86, 0xf9, 0x48, + 0xdd, 0x70, 0xe5, 0x37, 0xfa, 0x08, 0xae, 0x3c, 0x22, 0x5f, 0x2a, 0x7b, 0xf6, 0x6d, 0xaf, 0xd7, + 0xa3, 0xee, 0x30, 0x16, 0x32, 0x23, 0x85, 0x9c, 0x4e, 0x50, 0x06, 0x15, 0x67, 0x4b, 0xa1, 0xa2, + 0xf1, 0x4b, 0x0d, 0x5a, 0xa9, 0xd7, 0x94, 0xdf, 0x6f, 0x47, 0xf9, 0x11, 0x79, 0xfd, 0x5a, 0xd6, + 0xeb, 0x45, 0xd2, 0xff, 0x3c, 0x35, 0x16, 0xb2, 0xa9, 0xf1, 0x2f, 0x0d, 0x56, 0xf7, 0x29, 0x8f, + 0x8b, 0x12, 0xfd, 0x6f, 0x3b, 0xc1, 0x12, 0x7f, 0xd7, 0xcb, 0xfd, 0xdd, 0x81, 0xb5, 0xa2, 0xa1, + 0xca, 0xe9, 0x2b, 0x30, 0x23, 0x4e, 0x3e, 0x7e, 0x0f, 0x88, 0x06, 0x3b, 0x5f, 0x35, 0x61, 0x39, + 0x6d, 0xe8, 0xe2, 0x5f, 0x6a, 0x11, 0xf4, 0x18, 0x5a, 0xfb, 0xea, 0xd7, 0xb3, 0xf8, 0x1d, 0x06, + 0x9d, 0xf5, 0xb0, 0xd9, 0xbe, 0x5a, 0xbe, 0x18, 0x89, 0x36, 0x2a, 0xc8, 0x82, 0x2b, 0x45, 0x86, + 0xe9, 0x1b, 0xea, 0xff, 0x9f, 0xc1, 0x39, 0xa1, 0x7a, 0x95, 0x88, 0x2d, 0x0d, 0x7d, 0x0e, 0x8b, + 0xf9, 0x97, 0x3e, 0x94, 0xab, 0x70, 0xa5, 0x8f, 0x8f, 0x6d, 0xe3, 0x2c, 0x92, 0x44, 0xff, 0x67, + 0x02, 0x4e, 0xe7, 0x9e, 0xbd, 0x90, 0x91, 0x07, 0xfb, 0x65, 0xcf, 0x82, 0xed, 0xff, 0x3b, 0x93, + 0x26, 0xe1, 0xfe, 0x21, 0x34, 0xe2, 0x67, 0xa2, 0xbc, 0x9b, 0x0b, 0x8f, 0x47, 0xed, 0x56, 0x9e, + 0xdf, 0x20, 0x30, 0x2a, 0xe8, 0xe3, 0x68, 0xf3, 0x2e, 0x63, 0x25, 0x9b, 0x33, 0x8f, 0x23, 0xed, + 0xcb, 0x25, 0x0f, 0x12, 0x46, 0x05, 0x7d, 0x17, 0xe6, 0xc5, 0xd7, 0x91, 0xfa, 0xdd, 0x6a, 0xad, + 0x13, 0xfd, 0x4c, 0xda, 0x89, 0x7f, 0x26, 0xed, 0xdc, 0x75, 0x18, 0x3f, 0x69, 0x97, 0xbc, 0x18, + 0x28, 0x06, 0xcf, 0xe0, 0xd2, 0x3e, 0xe1, 0x29, 0xc0, 0x47, 0xd7, 0x5e, 0xeb, 0x1a, 0xd4, 0x36, + 0x8a, 0x64, 0x93, 0x77, 0x04, 0xa3, 0x82, 0x7e, 0xa3, 0xc1, 0xe5, 0x7d, 0xc2, 0x8b, 0x90, 0x19, + 0xbd, 0x57, 0x2e, 0xe4, 0x14, 0x68, 0xdd, 0x7e, 0x34, 0x6d, 0x66, 0xe7, 0xd9, 0x1a, 0x15, 0xf4, + 0x3b, 0x0d, 0xd6, 0x33, 0x8a, 0x65, 0x31, 0x30, 0xda, 0x3e, 0x5b, 0xb9, 0x12, 0xbc, 0xdc, 0xfe, + 0x6c, 0xca, 0x9f, 0x23, 0x33, 0x2c, 0x8d, 0x0a, 0x3a, 0x92, 0x67, 0x92, 0xb6, 0x3c, 0xf4, 0x76, + 0x69, 0x6f, 0x4b, 0xa4, 0x6f, 0x9c, 0xb6, 0x9c, 0x9c, 0xc3, 0x67, 0x30, 0xbf, 0x4f, 0x78, 0x5c, + 0x9f, 0xf3, 0x91, 0x56, 0x68, 0x8b, 0xf9, 0x54, 0x2d, 0x96, 0x74, 0x19, 0x31, 0xcb, 0x11, 0xaf, + 0x4c, 0x9d, 0xca, 0xe7, 0x6a, 0x69, 0xb1, 0xce, 0x47, 0x4c, 0x79, 0x99, 0x33, 0x2a, 0x9f, 0xec, + 0xfe, 0xed, 0xe5, 0x86, 0xf6, 0xd5, 0xcb, 0x0d, 0xed, 0x9f, 0x2f, 0x37, 0xb4, 0x1f, 0xdd, 0x7a, + 0xc5, 0xdf, 0x10, 0x64, 0xfe, 0x2c, 0x01, 0x33, 0x6a, 0xd9, 0x94, 0xb8, 0xbc, 0x37, 0x2b, 0x83, + 0xff, 0xd6, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x9f, 0xd1, 0x75, 0xb5, 0x20, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2010,8 +2351,14 @@ type RepoServerServiceClient interface { GetAppDetails(ctx context.Context, in *RepoServerAppDetailsQuery, opts ...grpc.CallOption) (*RepoAppDetailsResponse, error) // Get the meta-data (author, date, tags, message) for a specific revision of the repo GetRevisionMetadata(ctx context.Context, in *RepoServerRevisionMetadataRequest, opts ...grpc.CallOption) (*v1alpha1.RevisionMetadata, error) + // Get the chart details (author, date, tags, message) for a specific revision of the repo + GetRevisionChartDetails(ctx context.Context, in *RepoServerRevisionChartDetailsRequest, opts ...grpc.CallOption) (*v1alpha1.ChartDetails, error) // GetHelmCharts returns list of helm charts in the specified repository GetHelmCharts(ctx context.Context, in *HelmChartsRequest, opts ...grpc.CallOption) (*HelmChartsResponse, error) + // GetGitFiles returns a set of file paths and their contents for the given repo + GetGitFiles(ctx context.Context, in *GitFilesRequest, opts ...grpc.CallOption) (*GitFilesResponse, error) + // GetGitDirectories returns a set of directory paths for the given repo + GetGitDirectories(ctx context.Context, in *GitDirectoriesRequest, opts ...grpc.CallOption) (*GitDirectoriesResponse, error) } type repoServerServiceClient struct { @@ -2128,6 +2475,15 @@ func (c *repoServerServiceClient) GetRevisionMetadata(ctx context.Context, in *R return out, nil } +func (c *repoServerServiceClient) GetRevisionChartDetails(ctx context.Context, in *RepoServerRevisionChartDetailsRequest, opts ...grpc.CallOption) (*v1alpha1.ChartDetails, error) { + out := new(v1alpha1.ChartDetails) + err := c.cc.Invoke(ctx, "/repository.RepoServerService/GetRevisionChartDetails", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *repoServerServiceClient) GetHelmCharts(ctx context.Context, in *HelmChartsRequest, opts ...grpc.CallOption) (*HelmChartsResponse, error) { out := new(HelmChartsResponse) err := c.cc.Invoke(ctx, "/repository.RepoServerService/GetHelmCharts", in, out, opts...) @@ -2137,6 +2493,24 @@ func (c *repoServerServiceClient) GetHelmCharts(ctx context.Context, in *HelmCha return out, nil } +func (c *repoServerServiceClient) GetGitFiles(ctx context.Context, in *GitFilesRequest, opts ...grpc.CallOption) (*GitFilesResponse, error) { + out := new(GitFilesResponse) + err := c.cc.Invoke(ctx, "/repository.RepoServerService/GetGitFiles", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *repoServerServiceClient) GetGitDirectories(ctx context.Context, in *GitDirectoriesRequest, opts ...grpc.CallOption) (*GitDirectoriesResponse, error) { + out := new(GitDirectoriesResponse) + err := c.cc.Invoke(ctx, "/repository.RepoServerService/GetGitDirectories", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // RepoServerServiceServer is the server API for RepoServerService service. type RepoServerServiceServer interface { // GenerateManifest generates manifest for application in specified repo name and revision @@ -2157,8 +2531,14 @@ type RepoServerServiceServer interface { GetAppDetails(context.Context, *RepoServerAppDetailsQuery) (*RepoAppDetailsResponse, error) // Get the meta-data (author, date, tags, message) for a specific revision of the repo GetRevisionMetadata(context.Context, *RepoServerRevisionMetadataRequest) (*v1alpha1.RevisionMetadata, error) + // Get the chart details (author, date, tags, message) for a specific revision of the repo + GetRevisionChartDetails(context.Context, *RepoServerRevisionChartDetailsRequest) (*v1alpha1.ChartDetails, error) // GetHelmCharts returns list of helm charts in the specified repository GetHelmCharts(context.Context, *HelmChartsRequest) (*HelmChartsResponse, error) + // GetGitFiles returns a set of file paths and their contents for the given repo + GetGitFiles(context.Context, *GitFilesRequest) (*GitFilesResponse, error) + // GetGitDirectories returns a set of directory paths for the given repo + GetGitDirectories(context.Context, *GitDirectoriesRequest) (*GitDirectoriesResponse, error) } // UnimplementedRepoServerServiceServer can be embedded to have forward compatible implementations. @@ -2192,9 +2572,18 @@ func (*UnimplementedRepoServerServiceServer) GetAppDetails(ctx context.Context, func (*UnimplementedRepoServerServiceServer) GetRevisionMetadata(ctx context.Context, req *RepoServerRevisionMetadataRequest) (*v1alpha1.RevisionMetadata, error) { return nil, status.Errorf(codes.Unimplemented, "method GetRevisionMetadata not implemented") } +func (*UnimplementedRepoServerServiceServer) GetRevisionChartDetails(ctx context.Context, req *RepoServerRevisionChartDetailsRequest) (*v1alpha1.ChartDetails, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRevisionChartDetails not implemented") +} func (*UnimplementedRepoServerServiceServer) GetHelmCharts(ctx context.Context, req *HelmChartsRequest) (*HelmChartsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetHelmCharts not implemented") } +func (*UnimplementedRepoServerServiceServer) GetGitFiles(ctx context.Context, req *GitFilesRequest) (*GitFilesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGitFiles not implemented") +} +func (*UnimplementedRepoServerServiceServer) GetGitDirectories(ctx context.Context, req *GitDirectoriesRequest) (*GitDirectoriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetGitDirectories not implemented") +} func RegisterRepoServerServiceServer(s *grpc.Server, srv RepoServerServiceServer) { s.RegisterService(&_RepoServerService_serviceDesc, srv) @@ -2370,6 +2759,24 @@ func _RepoServerService_GetRevisionMetadata_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } +func _RepoServerService_GetRevisionChartDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RepoServerRevisionChartDetailsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RepoServerServiceServer).GetRevisionChartDetails(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/repository.RepoServerService/GetRevisionChartDetails", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RepoServerServiceServer).GetRevisionChartDetails(ctx, req.(*RepoServerRevisionChartDetailsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _RepoServerService_GetHelmCharts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HelmChartsRequest) if err := dec(in); err != nil { @@ -2388,7 +2795,43 @@ func _RepoServerService_GetHelmCharts_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } -var _RepoServerService_serviceDesc = grpc.ServiceDesc{ +func _RepoServerService_GetGitFiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GitFilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RepoServerServiceServer).GetGitFiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/repository.RepoServerService/GetGitFiles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RepoServerServiceServer).GetGitFiles(ctx, req.(*GitFilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RepoServerService_GetGitDirectories_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GitDirectoriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RepoServerServiceServer).GetGitDirectories(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/repository.RepoServerService/GetGitDirectories", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RepoServerServiceServer).GetGitDirectories(ctx, req.(*GitDirectoriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RepoServerService_serviceDesc = grpc.ServiceDesc{ ServiceName: "repository.RepoServerService", HandlerType: (*RepoServerServiceServer)(nil), Methods: []grpc.MethodDesc{ @@ -2424,10 +2867,22 @@ var _RepoServerService_serviceDesc = grpc.ServiceDesc{ MethodName: "GetRevisionMetadata", Handler: _RepoServerService_GetRevisionMetadata_Handler, }, + { + MethodName: "GetRevisionChartDetails", + Handler: _RepoServerService_GetRevisionChartDetails_Handler, + }, { MethodName: "GetHelmCharts", Handler: _RepoServerService_GetHelmCharts_Handler, }, + { + MethodName: "GetGitFiles", + Handler: _RepoServerService_GetGitFiles_Handler, + }, + { + MethodName: "GetGitDirectories", + Handler: _RepoServerService_GetGitDirectories_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3711,6 +4166,59 @@ func (m *RepoServerRevisionMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (i return len(dAtA) - i, nil } +func (m *RepoServerRevisionChartDetailsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepoServerRevisionChartDetailsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepoServerRevisionChartDetailsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Repo != nil { + { + size, err := m.Repo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *HelmAppSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4123,139 +4631,372 @@ func (m *HelmChartsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintRepository(dAtA []byte, offset int, v uint64) int { - offset -= sovRepository(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *GitFilesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *ManifestRequest) Size() (n int) { - if m == nil { - return 0 - } + +func (m *GitFilesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitFilesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Repo != nil { - l = m.Repo.Size() - n += 1 + l + sovRepository(uint64(l)) - } - l = len(m.Revision) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) - } - if m.NoCache { - n += 2 - } - l = len(m.AppLabelKey) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) - } - l = len(m.AppName) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) - } - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) - } - if m.ApplicationSource != nil { - l = m.ApplicationSource.Size() - n += 1 + l + sovRepository(uint64(l)) - } - if len(m.Repos) > 0 { - for _, e := range m.Repos { - l = e.Size() - n += 1 + l + sovRepository(uint64(l)) - } - } - if len(m.Plugins) > 0 { - for _, e := range m.Plugins { - l = e.Size() - n += 1 + l + sovRepository(uint64(l)) - } - } - if m.KustomizeOptions != nil { - l = m.KustomizeOptions.Size() - n += 1 + l + sovRepository(uint64(l)) - } - l = len(m.KubeVersion) - if l > 0 { - n += 1 + l + sovRepository(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.ApiVersions) > 0 { - for _, s := range m.ApiVersions { - l = len(s) - n += 1 + l + sovRepository(uint64(l)) + if m.NoRevisionCache { + i-- + if m.NoRevisionCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x30 } - if m.VerifySignature { - n += 3 - } - if len(m.HelmRepoCreds) > 0 { - for _, e := range m.HelmRepoCreds { - l = e.Size() - n += 2 + l + sovRepository(uint64(l)) + if m.NewGitFileGlobbingEnabled { + i-- + if m.NewGitFileGlobbingEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - if m.NoRevisionCache { - n += 3 + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 } - l = len(m.TrackingMethod) - if l > 0 { - n += 2 + l + sovRepository(uint64(l)) + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x1a } - if len(m.EnabledSourceTypes) > 0 { - for k, v := range m.EnabledSourceTypes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 2 + sovRepository(uint64(mapEntrySize)) + if m.SubmoduleEnabled { + i-- + if m.SubmoduleEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - if m.HelmOptions != nil { - l = m.HelmOptions.Size() - n += 2 + l + sovRepository(uint64(l)) - } - if m.HasMultipleSources { - n += 3 - } - if len(m.RefSources) > 0 { - for k, v := range m.RefSources { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovRepository(uint64(l)) + if m.Repo != nil { + { + size, err := m.Repo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + l - n += mapEntrySize + 2 + sovRepository(uint64(mapEntrySize)) + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return len(dAtA) - i, nil } -func (m *ManifestRequestWithFiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Part != nil { - n += m.Part.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *GitFilesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil +} + +func (m *GitFilesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitFilesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Map) > 0 { + for k := range m.Map { + v := m.Map[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRepository(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRepository(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRepository(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GitDirectoriesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitDirectoriesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitDirectoriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NoRevisionCache { + i-- + if m.NoRevisionCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x1a + } + if m.SubmoduleEnabled { + i-- + if m.SubmoduleEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Repo != nil { + { + size, err := m.Repo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRepository(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GitDirectoriesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitDirectoriesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitDirectoriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintRepository(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRepository(dAtA []byte, offset int, v uint64) int { + offset -= sovRepository(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ManifestRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.NoCache { + n += 2 + } + l = len(m.AppLabelKey) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.AppName) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.ApplicationSource != nil { + l = m.ApplicationSource.Size() + n += 1 + l + sovRepository(uint64(l)) + } + if len(m.Repos) > 0 { + for _, e := range m.Repos { + l = e.Size() + n += 1 + l + sovRepository(uint64(l)) + } + } + if len(m.Plugins) > 0 { + for _, e := range m.Plugins { + l = e.Size() + n += 1 + l + sovRepository(uint64(l)) + } + } + if m.KustomizeOptions != nil { + l = m.KustomizeOptions.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.KubeVersion) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if len(m.ApiVersions) > 0 { + for _, s := range m.ApiVersions { + l = len(s) + n += 1 + l + sovRepository(uint64(l)) + } + } + if m.VerifySignature { + n += 3 + } + if len(m.HelmRepoCreds) > 0 { + for _, e := range m.HelmRepoCreds { + l = e.Size() + n += 2 + l + sovRepository(uint64(l)) + } + } + if m.NoRevisionCache { + n += 3 + } + l = len(m.TrackingMethod) + if l > 0 { + n += 2 + l + sovRepository(uint64(l)) + } + if len(m.EnabledSourceTypes) > 0 { + for k, v := range m.EnabledSourceTypes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 2 + sovRepository(uint64(mapEntrySize)) + } + } + if m.HelmOptions != nil { + l = m.HelmOptions.Size() + n += 2 + l + sovRepository(uint64(l)) + } + if m.HasMultipleSources { + n += 3 + } + if len(m.RefSources) > 0 { + for k, v := range m.RefSources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovRepository(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + l + n += mapEntrySize + 2 + sovRepository(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ManifestRequestWithFiles) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Part != nil { + n += m.Part.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n } func (m *ManifestRequestWithFiles_Request) Size() (n int) { @@ -4688,6 +5429,30 @@ func (m *RepoServerRevisionMetadataRequest) Size() (n int) { return n } +func (m *RepoServerRevisionChartDetailsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *HelmAppSpec) Size() (n int) { if m == nil { return 0 @@ -4883,6 +5648,107 @@ func (m *HelmChartsResponse) Size() (n int) { return n } +func (m *GitFilesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + if m.SubmoduleEnabled { + n += 2 + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.NewGitFileGlobbingEnabled { + n += 2 + } + if m.NoRevisionCache { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GitFilesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovRepository(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovRepository(uint64(len(k))) + l + n += mapEntrySize + 1 + sovRepository(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GitDirectoriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Repo != nil { + l = m.Repo.Size() + n += 1 + l + sovRepository(uint64(l)) + } + if m.SubmoduleEnabled { + n += 2 + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovRepository(uint64(l)) + } + if m.NoRevisionCache { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GitDirectoriesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovRepository(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovRepository(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -8473,7 +9339,7 @@ func (m *RepoServerRevisionMetadataRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { +func (m *RepoServerRevisionChartDetailsRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8496,79 +9362,15 @@ func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HelmAppSpec: wiretype end group for non-group") + return fmt.Errorf("proto: RepoServerRevisionChartDetailsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HelmAppSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RepoServerRevisionChartDetailsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRepository - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRepository - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRepository - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFiles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRepository - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRepository - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRepository - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValueFiles = append(m.ValueFiles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8595,14 +9397,16 @@ func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, &v1alpha1.HelmParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Repo == nil { + m.Repo = &v1alpha1.Repository{} + } + if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8630,13 +9434,13 @@ func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileParameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRepository @@ -8646,25 +9450,23 @@ func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRepository } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthRepository } if postIndex > l { return io.ErrUnexpectedEOF } - m.FileParameters = append(m.FileParameters, &v1alpha1.HelmFileParameter{}) - if err := m.FileParameters[len(m.FileParameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Revision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8688,7 +9490,7 @@ func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *KustomizeAppSpec) Unmarshal(dAtA []byte) error { +func (m *HelmAppSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8711,15 +9513,15 @@ func (m *KustomizeAppSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KustomizeAppSpec: wiretype end group for non-group") + return fmt.Errorf("proto: HelmAppSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KustomizeAppSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HelmAppSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 3: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8747,21 +9549,236 @@ func (m *KustomizeAppSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRepository(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFiles", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthRepository } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + m.ValueFiles = append(m.ValueFiles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, &v1alpha1.HelmParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileParameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileParameters = append(m.FileParameters, &v1alpha1.HelmFileParameter{}) + if err := m.FileParameters[len(m.FileParameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KustomizeAppSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizeAppSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizeAppSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -9616,6 +10633,638 @@ func (m *HelmChartsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *GitFilesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitFilesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitFilesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repo == nil { + m.Repo = &v1alpha1.Repository{} + } + if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SubmoduleEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SubmoduleEnabled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NewGitFileGlobbingEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NewGitFileGlobbingEnabled = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoRevisionCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoRevisionCache = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitFilesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitFilesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitFilesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Map == nil { + m.Map = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRepository + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRepository + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthRepository + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthRepository + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Map[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitDirectoriesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitDirectoriesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitDirectoriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repo == nil { + m.Repo = &v1alpha1.Repository{} + } + if err := m.Repo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SubmoduleEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SubmoduleEnabled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoRevisionCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoRevisionCache = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitDirectoriesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitDirectoriesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitDirectoriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRepository(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRepository + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipRepository(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go index c73d7a7def..fdea46cdea 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/cache.go @@ -10,7 +10,7 @@ import ( "crypto/tls" "crypto/x509" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "github.com/spf13/cobra" "github.com/argoproj/argo-cd/v2/common" @@ -78,7 +78,7 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) sentinelAddresses := make([]string, 0) sentinelMaster := "" redisDB := 0 - redisCACerticate := "" + redisCACertificate := "" redisClientCertificate := "" redisClientKey := "" redisUseTLS := false @@ -95,8 +95,8 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) cmd.Flags().StringVar(&redisClientCertificate, "redis-client-certificate", "", "Path to Redis client certificate (e.g. /etc/certs/redis/client.crt).") cmd.Flags().StringVar(&redisClientKey, "redis-client-key", "", "Path to Redis client key (e.g. /etc/certs/redis/client.crt).") cmd.Flags().BoolVar(&insecureRedis, "redis-insecure-skip-tls-verify", false, "Skip Redis server certificate validation.") - cmd.Flags().StringVar(&redisCACerticate, "redis-ca-certificate", "", "Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.") - cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(RedisCompressionNone)), "Enable compression for data sent to Redis with the required compression algorithm. (possible values: none, gzip)") + cmd.Flags().StringVar(&redisCACertificate, "redis-ca-certificate", "", "Path to Redis server CA certificate (e.g. /etc/certs/redis/ca.crt). If not specified, system trusted CAs will be used for server certificate validation.") + cmd.Flags().StringVar(&compressionStr, "redis-compress", env.StringFromEnv("REDIS_COMPRESSION", string(RedisCompressionGZip)), "Enable compression for data sent to Redis with the required compression algorithm. (possible values: gzip, none)") return func() (*Cache, error) { var tlsConfig *tls.Config = nil if redisUseTLS { @@ -110,8 +110,8 @@ func AddCacheFlagsToCmd(cmd *cobra.Command, opts ...func(client *redis.Client)) } if insecureRedis { tlsConfig.InsecureSkipVerify = true - } else if redisCACerticate != "" { - redisCA, err := certutil.ParseTLSCertificatesFromPath(redisCACerticate) + } else if redisCACertificate != "" { + redisCA, err := certutil.ParseTLSCertificatesFromPath(redisCACertificate) if err != nil { return nil, err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go index 658468386e..f483d2cbec 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go @@ -7,12 +7,13 @@ import ( "encoding/json" "fmt" "io" + "net" "time" ioutil "github.com/argoproj/argo-cd/v2/util/io" - rediscache "github.com/go-redis/cache/v8" - "github.com/go-redis/redis/v8" + rediscache "github.com/go-redis/cache/v9" + "github.com/redis/go-redis/v9" ) type RedisCompressionType string @@ -155,32 +156,30 @@ type MetricsRegistry interface { ObserveRedisRequestDuration(duration time.Duration) } -var metricStartTimeKey = struct{}{} - type redisHook struct { registry MetricsRegistry } -func (rh *redisHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) { - return context.WithValue(ctx, metricStartTimeKey, time.Now()), nil +func (rh *redisHook) DialHook(next redis.DialHook) redis.DialHook { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := next(ctx, network, addr) + return conn, err + } } -func (rh *redisHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error { - cmdErr := cmd.Err() - rh.registry.IncRedisRequest(cmdErr != nil && cmdErr != redis.Nil) +func (rh *redisHook) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + startTime := time.Now() - startTime := ctx.Value(metricStartTimeKey).(time.Time) - duration := time.Since(startTime) - rh.registry.ObserveRedisRequestDuration(duration) + err := next(ctx, cmd) + rh.registry.IncRedisRequest(err != nil && err != redis.Nil) + rh.registry.ObserveRedisRequestDuration(time.Since(startTime)) - return nil -} - -func (redisHook) BeforeProcessPipeline(ctx context.Context, _ []redis.Cmder) (context.Context, error) { - return ctx, nil + return err + } } -func (redisHook) AfterProcessPipeline(_ context.Context, _ []redis.Cmder) error { +func (redisHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook { return nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis_hook.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis_hook.go index c378dd398c..e7cc3f4bcc 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis_hook.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis_hook.go @@ -2,14 +2,13 @@ package cache import ( "context" - "strings" + "errors" + "net" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" log "github.com/sirupsen/logrus" ) -const NoSuchHostErr = "no such host" - type argoRedisHooks struct { reconnectCallback func() } @@ -18,22 +17,25 @@ func NewArgoRedisHook(reconnectCallback func()) *argoRedisHooks { return &argoRedisHooks{reconnectCallback: reconnectCallback} } -func (hook *argoRedisHooks) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) { - return ctx, nil -} - -func (hook *argoRedisHooks) AfterProcess(ctx context.Context, cmd redis.Cmder) error { - if cmd.Err() != nil && strings.Contains(cmd.Err().Error(), NoSuchHostErr) { - log.Warnf("Reconnect to redis because error: \"%v\"", cmd.Err()) - hook.reconnectCallback() +func (hook *argoRedisHooks) DialHook(next redis.DialHook) redis.DialHook { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := next(ctx, network, addr) + return conn, err } - return nil } -func (hook *argoRedisHooks) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) { - return ctx, nil +func (hook *argoRedisHooks) ProcessHook(next redis.ProcessHook) redis.ProcessHook { + return func(ctx context.Context, cmd redis.Cmder) error { + var dnsError *net.DNSError + err := next(ctx, cmd) + if err != nil && errors.As(err, &dnsError) { + log.Warnf("Reconnect to redis because error: \"%v\"", err) + hook.reconnectCallback() + } + return err + } } -func (hook *argoRedisHooks) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error { +func (hook *argoRedisHooks) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.ProcessPipelineHook { return nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go index d780df90bc..4f643a2895 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go @@ -6,7 +6,7 @@ import ( "net/http" "os" - "github.com/ghodss/yaml" + "sigs.k8s.io/yaml" ) // UnmarshalReader is used to read manifests from stdin diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go index dc1549082d..6808f59d2d 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go @@ -1,6 +1,7 @@ package env import ( + "math" "os" "strconv" "strings" @@ -21,20 +22,24 @@ func ParseNumFromEnv(env string, defaultValue, min, max int) int { if str == "" { return defaultValue } - num, err := strconv.Atoi(str) + num, err := strconv.ParseInt(str, 10, 0) if err != nil { log.Warnf("Could not parse '%s' as a number from environment %s", str, env) return defaultValue } - if num < min { + if num > math.MaxInt || num < math.MinInt { + log.Warnf("Value in %s is %d is outside of the min and max %d allowed values. Using default %d", env, num, min, defaultValue) + return defaultValue + } + if int(num) < min { log.Warnf("Value in %s is %d, which is less than minimum %d allowed", env, num, min) return defaultValue } - if num > max { + if int(num) > max { log.Warnf("Value in %s is %d, which is greater than maximum %d allowed", env, num, max) return defaultValue } - return num + return int(num) } // Helper function to parse a int64 from an environment variable. Returns a @@ -119,8 +124,17 @@ func ParseDurationFromEnv(env string, defaultValue, min, max time.Duration) time return dur } -func StringFromEnv(env string, defaultValue string) string { - if str := os.Getenv(env); str != "" { +type StringFromEnvOpts struct { + // AllowEmpty allows the value to be empty as long as the environment variable is set. + AllowEmpty bool +} + +func StringFromEnv(env string, defaultValue string, opts ...StringFromEnvOpts) string { + opt := StringFromEnvOpts{} + for _, o := range opts { + opt.AllowEmpty = opt.AllowEmpty || o.AllowEmpty + } + if str, ok := os.LookupEnv(env); opt.AllowEmpty && ok || str != "" { return str } return defaultValue diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go index c57ec61c9f..6b8587c0b3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go @@ -17,6 +17,7 @@ import ( "time" argoexec "github.com/argoproj/pkg/exec" + "github.com/bmatcuk/doublestar/v4" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" @@ -66,7 +67,7 @@ type Client interface { Checkout(revision string, submoduleEnabled bool) error LsRefs() (*Refs, error) LsRemote(revision string) (string, error) - LsFiles(path string) ([]string, error) + LsFiles(path string, enableNewGitFileGlobbing bool) ([]string, error) LsLargeFiles() ([]string, error) CommitSHA() (string, error) RevisionMetadata(revision string) (*RevisionMetadata, error) @@ -313,7 +314,7 @@ func (m *nativeGitClient) Init() error { return err } log.Infof("Initializing %s to %s", m.repoURL, m.root) - _, err = executil.Run(exec.Command("rm", "-rf", m.root)) + err = os.RemoveAll(m.root) if err != nil { return fmt.Errorf("unable to clean repo at %s: %v", m.root, err) } @@ -340,9 +341,9 @@ func (m *nativeGitClient) IsLFSEnabled() bool { func (m *nativeGitClient) fetch(revision string) error { var err error if revision != "" { - err = m.runCredentialedCmd("git", "fetch", "origin", revision, "--tags", "--force", "--prune") + err = m.runCredentialedCmd("fetch", "origin", revision, "--tags", "--force", "--prune") } else { - err = m.runCredentialedCmd("git", "fetch", "origin", "--tags", "--force", "--prune") + err = m.runCredentialedCmd("fetch", "origin", "--tags", "--force", "--prune") } return err } @@ -360,7 +361,7 @@ func (m *nativeGitClient) Fetch(revision string) error { if err == nil && m.IsLFSEnabled() { largeFiles, err := m.LsLargeFiles() if err == nil && len(largeFiles) > 0 { - err = m.runCredentialedCmd("git", "lfs", "fetch", "--all") + err = m.runCredentialedCmd("lfs", "fetch", "--all") if err != nil { return err } @@ -371,14 +372,44 @@ func (m *nativeGitClient) Fetch(revision string) error { } // LsFiles lists the local working tree, including only files that are under source control -func (m *nativeGitClient) LsFiles(path string) ([]string, error) { - out, err := m.runCmd("ls-files", "--full-name", "-z", "--", path) - if err != nil { - return nil, err +func (m *nativeGitClient) LsFiles(path string, enableNewGitFileGlobbing bool) ([]string, error) { + if enableNewGitFileGlobbing { + // This is the new way with safer globbing + err := os.Chdir(m.root) + if err != nil { + return nil, err + } + all_files, err := doublestar.FilepathGlob(path) + if err != nil { + return nil, err + } + var files []string + for _, file := range all_files { + link, err := filepath.EvalSymlinks(file) + if err != nil { + return nil, err + } + absPath, err := filepath.Abs(link) + if err != nil { + return nil, err + } + if strings.HasPrefix(absPath, m.root) { + files = append(files, file) + } else { + log.Warnf("Absolute path for %s is outside of repository, removing it", file) + } + } + return files, nil + } else { + // This is the old and default way + out, err := m.runCmd("ls-files", "--full-name", "-z", "--", path) + if err != nil { + return nil, err + } + // remove last element, which is blank regardless of whether we're using nullbyte or newline + ss := strings.Split(out, "\000") + return ss[:len(ss)-1], nil } - // remove last element, which is blank regardless of whether we're using nullbyte or newline - ss := strings.Split(out, "\000") - return ss[:len(ss)-1], nil } // LsLargeFiles lists all files that have references to LFS storage @@ -393,10 +424,10 @@ func (m *nativeGitClient) LsLargeFiles() ([]string, error) { // Submodule embed other repositories into this repository func (m *nativeGitClient) Submodule() error { - if err := m.runCredentialedCmd("git", "submodule", "sync", "--recursive"); err != nil { + if err := m.runCredentialedCmd("submodule", "sync", "--recursive"); err != nil { return err } - if err := m.runCredentialedCmd("git", "submodule", "update", "--init", "--recursive"); err != nil { + if err := m.runCredentialedCmd("submodule", "update", "--init", "--recursive"); err != nil { return err } return nil @@ -430,7 +461,12 @@ func (m *nativeGitClient) Checkout(revision string, submoduleEnabled bool) error } } } - if _, err := m.runCmd("clean", "-fdx"); err != nil { + // NOTE + // The double “f†in the arguments is not a typo: the first “f†tells + // `git clean` to delete untracked files and directories, and the second “f†+ // tells it to clean untractked nested Git repositories (for example a + // submodule which has since been removed). + if _, err := m.runCmd("clean", "-ffdx"); err != nil { return err } return nil @@ -649,7 +685,7 @@ func (m *nativeGitClient) runCmd(args ...string) (string, error) { // runCredentialedCmd is a convenience function to run a git command with username/password credentials // nolint:unparam -func (m *nativeGitClient) runCredentialedCmd(command string, args ...string) error { +func (m *nativeGitClient) runCredentialedCmd(args ...string) error { closer, environ, err := m.creds.Environ() if err != nil { return err @@ -664,7 +700,7 @@ func (m *nativeGitClient) runCredentialedCmd(command string, args ...string) err } } - cmd := exec.Command(command, args...) + cmd := exec.Command("git", args...) cmd.Env = append(cmd.Env, environ...) _, err = m.runCmdOutput(cmd, runOpts{}) return err @@ -679,6 +715,8 @@ func (m *nativeGitClient) runCmdOutput(cmd *exec.Cmd, ropts runOpts) (string, er cmd.Env = append(cmd.Env, "HOME=/dev/null") // Skip LFS for most Git operations except when explicitly requested cmd.Env = append(cmd.Env, "GIT_LFS_SKIP_SMUDGE=1") + // Disable Git terminal prompts in case we're running with a tty + cmd.Env = append(cmd.Env, "GIT_TERMINAL_PROMPT=false") // For HTTPS repositories, we need to consider insecure repositories as well // as custom CA bundles from the cert database. diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go index 323d78398a..93b9556d7c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/grpc.go @@ -10,6 +10,7 @@ import ( "github.com/argoproj/argo-cd/v2/common" "github.com/sirupsen/logrus" + "golang.org/x/net/proxy" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -63,7 +64,7 @@ func BlockingDial(ctx context.Context, network, address string, creds credential dialer := func(ctx context.Context, address string) (net.Conn, error) { - conn, err := (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) + conn, err := proxy.Dial(ctx, network, address) if err != nil { writeResult(err) return nil, err diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go new file mode 100644 index 0000000000..484e2b61dc --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/grpc/trace.go @@ -0,0 +1,33 @@ +package grpc + +import ( + "sync" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "google.golang.org/grpc" +) + +var ( + otelUnaryInterceptor grpc.UnaryClientInterceptor + otelStreamInterceptor grpc.StreamClientInterceptor + interceptorsInitialized = sync.Once{} +) + +// otel interceptors must be created once to avoid memory leak +// see https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 for details +func ensureInitialized() { + interceptorsInitialized.Do(func() { + otelUnaryInterceptor = otelgrpc.UnaryClientInterceptor() + otelStreamInterceptor = otelgrpc.StreamClientInterceptor() + }) +} + +func OTELUnaryClientInterceptor() grpc.UnaryClientInterceptor { + ensureInitialized() + return otelUnaryInterceptor +} + +func OTELStreamClientInterceptor() grpc.StreamClientInterceptor { + ensureInitialized() + return otelStreamInterceptor +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go index 5824f6233f..ecfa6c0bb6 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go @@ -2,12 +2,12 @@ package helm import ( "bytes" + "context" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" - executil "github.com/argoproj/argo-cd/v2/util/exec" "io" "net/http" "net/url" @@ -18,21 +18,25 @@ import ( "strings" "time" + executil "github.com/argoproj/argo-cd/v2/util/exec" + "github.com/argoproj/pkg/sync" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" - "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/util/cache" argoio "github.com/argoproj/argo-cd/v2/util/io" "github.com/argoproj/argo-cd/v2/util/io/files" "github.com/argoproj/argo-cd/v2/util/proxy" - "github.com/argoproj/argo-cd/v2/util/text" ) var ( globalLock = sync.NewKeyLock() indexLock = sync.NewKeyLock() + + OCINotEnabledErr = errors.New("could not perform the action when oci is not enabled") ) type Creds struct { @@ -132,7 +136,7 @@ func untarChart(tempDir string, cachedChartPath string, manifestMaxExtractedSize if err != nil { return err } - return files.Untgz(tempDir, reader, manifestMaxExtractedSize) + return files.Untgz(tempDir, reader, manifestMaxExtractedSize, false) } func (c *nativeHelmChart) ExtractChart(chart string, version string, passCredentials bool, manifestMaxExtractedSize int64, disableManifestMaxExtractedSize bool) (string, argoio.Closer, error) { @@ -190,7 +194,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, passCredent } // 'helm pull' ensures that chart is downloaded into temp directory - _, err = helmCmd.PullOCI(c.repoURL, chart, version, tempDest) + _, err = helmCmd.PullOCI(c.repoURL, chart, version, tempDest, c.creds) if err != nil { return "", nil, err } @@ -299,7 +303,7 @@ func (c *nativeHelmChart) loadRepoIndex() ([]byte, error) { return nil, err } - req, err := http.NewRequest("GET", indexURL, nil) + req, err := http.NewRequest(http.MethodGet, indexURL, nil) if err != nil { return nil, err } @@ -314,8 +318,8 @@ func (c *nativeHelmChart) loadRepoIndex() ([]byte, error) { } tr := &http.Transport{ - Proxy: proxy.GetCallback(c.proxy), - TLSClientConfig: tlsConf, + Proxy: proxy.GetCallback(c.proxy), + TLSClientConfig: tlsConf, DisableKeepAlives: true, } client := http.Client{Transport: tr} @@ -325,7 +329,7 @@ func (c *nativeHelmChart) loadRepoIndex() ([]byte, error) { } defer func() { _ = resp.Body.Close() }() - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return nil, errors.New("failed to get index: " + resp.Status) } return io.ReadAll(resp.Body) @@ -399,131 +403,12 @@ func getIndexURL(rawURL string) (string, error) { return repoURL.String(), nil } -func getTagsListURL(rawURL string, chart string) (string, error) { - repoURL, err := url.Parse(strings.Trim(rawURL, "/")) - if err != nil { - return "", fmt.Errorf("unable to parse repo url: %v", err) - } - tagsPathFormat := "%s/v2/%s/tags/list" - repoURL.Scheme = "https" - repoURL.Path = fmt.Sprintf(tagsPathFormat, repoURL.Path, chart) - repoURL.RawPath = fmt.Sprintf(tagsPathFormat, repoURL.RawPath, url.PathEscape(chart)) - return repoURL.String(), nil -} - -func (c *nativeHelmChart) getTags(chart string) ([]byte, error) { - nextURL, err := getTagsListURL(c.repoURL, chart) - if err != nil { - return nil, fmt.Errorf("failed to get tag list url: %v", err) - } - - allTags := &TagsList{} - var data []byte - for nextURL != "" { - log.Debugf("fetching %s tags from %s", chart, sanitizeLog(text.Trunc(nextURL, 100))) - data, nextURL, err = c.getTagsFromUrl(nextURL) - if err != nil { - return nil, fmt.Errorf("failed tags part: %v", err) - } - - tags := &TagsList{} - err := json.Unmarshal(data, tags) - if err != nil { - return nil, fmt.Errorf("unable to decode json: %v", err) - } - allTags.Tags = append(allTags.Tags, tags.Tags...) - } - data, err = json.Marshal(allTags) - if err != nil { - return nil, fmt.Errorf("failed to marshal tag json: %w", err) - } - return data, nil -} - -func getNextUrl(resp *http.Response) (string, error) { - link := resp.Header.Get("Link") - if link == "" { - return "", nil - } - if link[0] != '<' { - return "", fmt.Errorf("invalid next link %q: missing '<'", link) - } - if i := strings.IndexByte(link, '>'); i == -1 { - return "", fmt.Errorf("invalid next link %q: missing '>'", link) - } else { - link = link[1:i] - } - linkURL, err := resp.Request.URL.Parse(link) - if err != nil { - return "", err - } - return linkURL.String(), nil -} - -func sanitizeLog(input string) string { - sanitized := strings.ReplaceAll(input, "\r", "") - sanitized = strings.ReplaceAll(sanitized, "\n", "") - return sanitized -} - -func (c *nativeHelmChart) getTagsFromUrl(tagsURL string) ([]byte, string, error) { - req, err := http.NewRequest("GET", tagsURL, nil) - if err != nil { - return nil, "", fmt.Errorf("failed create request: %v", err) - } - req.Header.Add("Accept", `application/json`) - if c.creds.Username != "" || c.creds.Password != "" { - // only basic supported - req.SetBasicAuth(c.creds.Username, c.creds.Password) - } - - tlsConf, err := newTLSConfig(c.creds) - if err != nil { - return nil, "", fmt.Errorf("failed setup tlsConfig: %v", err) - } - - tr := &http.Transport{ - Proxy: proxy.GetCallback(c.proxy), - TLSClientConfig: tlsConf, - DisableKeepAlives: true, - } - client := http.Client{Transport: tr} - resp, err := client.Do(req) - if err != nil { - return nil, "", fmt.Errorf("request failed: %v", err) - } - defer func() { - if err = resp.Body.Close(); err != nil { - log.WithFields(log.Fields{ - common.SecurityField: common.SecurityMedium, - common.SecurityCWEField: 775, - }).Errorf("error closing response %q: %v", text.Trunc(tagsURL, 100), err) - } - }() - - if resp.StatusCode != 200 { - data, err := io.ReadAll(resp.Body) - var responseExcerpt string - if err != nil { - responseExcerpt = fmt.Sprintf("err: %v", err) - } else { - responseExcerpt = text.Trunc(string(data), 100) - } - return nil, "", fmt.Errorf("invalid response: %s %s", resp.Status, responseExcerpt) - } - data, err := io.ReadAll(resp.Body) - if err != nil { - return nil, "", fmt.Errorf("failed to read body: %v", err) - } - nextUrl, err := getNextUrl(resp) - return data, nextUrl, err -} - func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) { - tagsURL, err := getTagsListURL(c.repoURL, chart) - if err != nil { - return nil, fmt.Errorf("invalid tags url: %v", err) + if !c.enableOci { + return nil, OCINotEnabledErr } + + tagsURL := strings.Replace(fmt.Sprintf("%s/%s", c.repoURL, chart), "https://", "", 1) indexLock.Lock(tagsURL) defer indexLock.Unlock(tagsURL) @@ -534,10 +419,44 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) } } + tags := &TagsList{} if len(data) == 0 { start := time.Now() - var err error - data, err = c.getTags(chart) + repo, err := remote.NewRepository(tagsURL) + if err != nil { + return nil, fmt.Errorf("failed to initialize repository: %v", err) + } + tlsConf, err := newTLSConfig(c.creds) + if err != nil { + return nil, fmt.Errorf("failed setup tlsConfig: %v", err) + } + client := &http.Client{Transport: &http.Transport{ + Proxy: proxy.GetCallback(c.proxy), + TLSClientConfig: tlsConf, + DisableKeepAlives: true, + }} + + repoHost, _, _ := strings.Cut(tagsURL, "/") + repo.Client = &auth.Client{ + Client: client, + Cache: nil, + Credential: auth.StaticCredential(repoHost, auth.Credential{ + Username: c.creds.Username, + Password: c.creds.Password, + }), + } + + ctx := context.Background() + err = repo.Tags(ctx, "", func(tagsResult []string) error { + for _, tag := range tagsResult { + // By convention: Change underscore (_) back to plus (+) to get valid SemVer + convertedTag := strings.ReplaceAll(tag, "_", "+") + tags.Tags = append(tags.Tags, convertedTag) + } + + return nil + }) + if err != nil { return nil, fmt.Errorf("failed to get tags: %v", err) } @@ -550,12 +469,11 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) log.Warnf("Failed to store tags list cache for repo: %s: %v", tagsURL, err) } } - } - - tags := &TagsList{} - err = json.Unmarshal(data, tags) - if err != nil { - return nil, fmt.Errorf("failed to decode tags: %v", err) + } else { + err := json.Unmarshal(data, tags) + if err != nil { + return nil, fmt.Errorf("failed to decode tags: %v", err) + } } return tags, nil diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go index 2be56b26d5..e61dfa8e7a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go @@ -7,6 +7,7 @@ import ( "path" "path/filepath" "regexp" + "strings" log "github.com/sirupsen/logrus" @@ -228,15 +229,18 @@ func (c *Cmd) Fetch(repo, chartName, version, destination string, creds Creds, p return c.run(args...) } -func (c *Cmd) PullOCI(repo string, chart string, version string, destination string) (string, error) { - return c.run( - "pull", - fmt.Sprintf("oci://%s/%s", repo, chart), - "--version", +func (c *Cmd) PullOCI(repo string, chart string, version string, destination string, creds Creds) (string, error) { + args := []string{"pull", fmt.Sprintf("oci://%s/%s", repo, chart), "--version", version, "--destination", - destination, - ) + destination} + if creds.CAPath != "" { + args = append(args, "--ca-file", creds.CAPath) + } + if creds.InsecureSkipVerify && c.insecureSkipVerifySupported { + args = append(args, "--insecure-skip-tls-verify") + } + return c.run(args...) } func (c *Cmd) dependencyBuild() (string, error) { @@ -247,6 +251,10 @@ func (c *Cmd) inspectValues(values string) (string, error) { return c.run(c.showCommand, "values", values) } +func (c *Cmd) InspectChart() (string, error) { + return c.run(c.showCommand, "chart", ".") +} + type TemplateOpts struct { Name string Namespace string @@ -264,6 +272,10 @@ var ( ) func cleanSetParameters(val string) string { + // `{}` equal helm list parameters format, so don't escape `,`. + if strings.HasPrefix(val, `{`) && strings.HasSuffix(val, `}`) { + return val + } return re.ReplaceAllString(val, `$1\,`) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go index e997d9f611..f586691867 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/helm.go @@ -8,14 +8,19 @@ import ( "path/filepath" "strings" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" + "sigs.k8s.io/yaml" "github.com/argoproj/argo-cd/v2/util/config" executil "github.com/argoproj/argo-cd/v2/util/exec" pathutil "github.com/argoproj/argo-cd/v2/util/io/path" ) +const ( + ResourcePolicyAnnotation = "helm.sh/resource-policy" + ResourcePolicyKeep = "keep" +) + type HelmRepository struct { Creds Name string diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go index 6c0a9e589f..656ff774b2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/tags.go @@ -1,11 +1,11 @@ package helm import ( - "fmt" + "fmt" - log "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" - "github.com/Masterminds/semver/v3" + "github.com/Masterminds/semver/v3" ) type TagsList struct { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go index f665ded3ed..13973f732f 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/tar.go @@ -58,7 +58,7 @@ func Tgz(srcPath string, inclusions []string, exclusions []string, writers ...io // - a full path // - points to an empty directory or // - points to a non existing directory -func Untgz(dstPath string, r io.Reader, maxSize int64) error { +func Untgz(dstPath string, r io.Reader, maxSize int64, preserveFileMode bool) error { if !filepath.IsAbs(dstPath) { return fmt.Errorf("dstPath points to a relative path: %s", dstPath) } @@ -92,7 +92,11 @@ func Untgz(dstPath string, r io.Reader, maxSize int64) error { switch header.Typeflag { case tar.TypeDir: - err := os.MkdirAll(target, 0755) + var mode os.FileMode = 0755 + if preserveFileMode { + mode = os.FileMode(header.Mode) + } + err := os.MkdirAll(target, mode) if err != nil { return fmt.Errorf("error creating nested folders: %w", err) } @@ -113,12 +117,17 @@ func Untgz(dstPath string, r io.Reader, maxSize int64) error { return fmt.Errorf("error creating symlink: %s", err) } case tar.TypeReg: + var mode os.FileMode = 0644 + if preserveFileMode { + mode = os.FileMode(header.Mode) + } + err := os.MkdirAll(filepath.Dir(target), 0755) if err != nil { return fmt.Errorf("error creating nested folders: %w", err) } - f, err := os.Create(target) + f, err := os.OpenFile(target, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) if err != nil { return fmt.Errorf("error creating file %q: %w", target, err) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go index 0df9a36d81..35bab0314b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go @@ -1,6 +1,7 @@ package kube import ( + "fmt" "regexp" "github.com/argoproj/gitops-engine/pkg/utils/kube" @@ -20,7 +21,10 @@ func IsValidResourceName(name string) bool { // SetAppInstanceLabel the recommended app.kubernetes.io/instance label against an unstructured object // Uses the legacy labeling if environment variable is set func SetAppInstanceLabel(target *unstructured.Unstructured, key, val string) error { - labels := target.GetLabels() + labels, _, err := nestedNullableStringMap(target.Object, "metadata", "labels") + if err != nil { + return err + } if labels == nil { labels = make(map[string]string) } @@ -96,7 +100,11 @@ func SetAppInstanceLabel(target *unstructured.Unstructured, key, val string) err // SetAppInstanceAnnotation the recommended app.kubernetes.io/instance annotation against an unstructured object // Uses the legacy labeling if environment variable is set func SetAppInstanceAnnotation(target *unstructured.Unstructured, key, val string) error { - annotations := target.GetAnnotations() + annotations, _, err := nestedNullableStringMap(target.Object, "metadata", "annotations") + if err != nil { + return fmt.Errorf("failed to get annotations from target object %s %s/%s: %w", target.GroupVersionKind().String(), target.GetNamespace(), target.GetName(), err) + } + if annotations == nil { annotations = make(map[string]string) } @@ -106,26 +114,37 @@ func SetAppInstanceAnnotation(target *unstructured.Unstructured, key, val string } // GetAppInstanceAnnotation returns the application instance name from annotation -func GetAppInstanceAnnotation(un *unstructured.Unstructured, key string) string { - if annotations := un.GetAnnotations(); annotations != nil { - return annotations[key] +func GetAppInstanceAnnotation(un *unstructured.Unstructured, key string) (string, error) { + annotations, _, err := nestedNullableStringMap(un.Object, "metadata", "annotations") + if err != nil { + return "", fmt.Errorf("failed to get annotations from target object %s %s/%s: %w", un.GroupVersionKind().String(), un.GetNamespace(), un.GetName(), err) } - return "" + if annotations != nil { + return annotations[key], nil + } + return "", nil } // GetAppInstanceLabel returns the application instance name from labels -func GetAppInstanceLabel(un *unstructured.Unstructured, key string) string { - if labels := un.GetLabels(); labels != nil { - return labels[key] +func GetAppInstanceLabel(un *unstructured.Unstructured, key string) (string, error) { + labels, _, err := nestedNullableStringMap(un.Object, "metadata", "labels") + if err != nil { + return "", err + } + if labels != nil { + return labels[key], nil } - return "" + return "", nil } // RemoveLabel removes label with the specified name -func RemoveLabel(un *unstructured.Unstructured, key string) { - labels := un.GetLabels() +func RemoveLabel(un *unstructured.Unstructured, key string) error { + labels, _, err := nestedNullableStringMap(un.Object, "metadata", "labels") + if err != nil { + return err + } if labels == nil { - return + return nil } for k := range labels { @@ -139,4 +158,19 @@ func RemoveLabel(un *unstructured.Unstructured, key string) { break } } + return nil +} + +// nestedNullableStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found and an error if not one of map[string]interface{} or nil, or contains non-string values in the map. +func nestedNullableStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + var m map[string]string + val, found, err := unstructured.NestedFieldNoCopy(obj, fields...) + if err != nil { + return nil, found, err + } + if found && val != nil { + return unstructured.NestedStringMap(obj, fields...) + } + return m, found, err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go index c661cb6f85..1ea6e0fdad 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go @@ -8,7 +8,6 @@ import ( "net/http" "os" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -68,7 +67,7 @@ func PortForward(targetPort int, namespace string, overrides *clientcmd.ConfigOv transport, upgrader, err := spdy.RoundTripperFor(config) if err != nil { - return -1, errors.Wrap(err, "Could not create round tripper") + return -1, fmt.Errorf("could not create round tripper: %w", err) } dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", url) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/security/rbac.go b/vendor/github.com/argoproj/argo-cd/v2/util/security/rbac.go index ebfdde01c3..d80cbbadb3 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/security/rbac.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/security/rbac.go @@ -4,8 +4,8 @@ import ( "fmt" ) -// AppRBACName constructs name of the app for use in RBAC checks. -func AppRBACName(defaultNS string, project string, namespace string, name string) string { +// RBACName constructs name of the app for use in RBAC checks. +func RBACName(defaultNS string, project string, namespace string, name string) string { if defaultNS != "" && namespace != defaultNS && namespace != "" { return fmt.Sprintf("%s/%s/%s", project, namespace, name) } else { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go index 5f03e516f8..e01e950ca2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go @@ -18,7 +18,6 @@ import ( "time" timeutil "github.com/argoproj/pkg/time" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" @@ -30,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes" v1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/yaml" "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" @@ -421,10 +421,10 @@ const ( resourceExclusionsKey = "resource.exclusions" // resourceInclusions is the key to the list of explicitly watched resources resourceInclusionsKey = "resource.inclusions" + // resourceIgnoreResourceUpdatesEnabledKey is the key to a boolean determining whether the resourceIgnoreUpdates feature is enabled + resourceIgnoreResourceUpdatesEnabledKey = "resource.ignoreResourceUpdatesEnabled" // resourceCustomLabelKey is the key to a custom label to show in node info, if present resourceCustomLabelsKey = "resource.customLabels" - // configManagementPluginsKey is the key to the list of config management plugins - configManagementPluginsKey = "configManagementPlugins" // kustomizeBuildOptionsKey is a string of kustomize build parameters kustomizeBuildOptionsKey = "kustomize.buildOptions" // kustomizeVersionKeyPrefix is a kustomize version key prefix @@ -530,6 +530,9 @@ type ArgoCDDiffOptions struct { // If set to true then differences caused by status are ignored. IgnoreResourceStatusField IgnoreStatus `json:"ignoreResourceStatusField,omitempty"` + + // If set to true then ignoreDifferences are applied to ignore application refresh on resource updates. + IgnoreDifferencesOnResourceUpdates bool `json:"ignoreDifferencesOnResourceUpdates,omitempty"` } func (e *incompleteSettingsError) Error() string { @@ -745,21 +748,6 @@ func (mgr *SettingsManager) GetServerRBACLogEnforceEnable() (bool, error) { return strconv.ParseBool(argoCDCM.Data[settingsServerRBACLogEnforceEnableKey]) } -func (mgr *SettingsManager) GetConfigManagementPlugins() ([]v1alpha1.ConfigManagementPlugin, error) { - argoCDCM, err := mgr.getConfigMap() - if err != nil { - return nil, err - } - plugins := make([]v1alpha1.ConfigManagementPlugin, 0) - if value, ok := argoCDCM.Data[configManagementPluginsKey]; ok { - err := yaml.Unmarshal([]byte(value), &plugins) - if err != nil { - return nil, err - } - } - return plugins, nil -} - func (mgr *SettingsManager) GetDeepLinks(deeplinkType string) ([]DeepLink, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { @@ -778,7 +766,7 @@ func (mgr *SettingsManager) GetDeepLinks(deeplinkType string) ([]DeepLink, error func (mgr *SettingsManager) GetEnabledSourceTypes() (map[string]bool, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get argo-cd config map: %w", err) } res := map[string]bool{} for sourceType := range sourceTypeToEnableGenerationKey { @@ -794,6 +782,54 @@ func (mgr *SettingsManager) GetEnabledSourceTypes() (map[string]bool, error) { return res, nil } +func (mgr *SettingsManager) GetIgnoreResourceUpdatesOverrides() (map[string]v1alpha1.ResourceOverride, error) { + compareOptions, err := mgr.GetResourceCompareOptions() + if err != nil { + return nil, fmt.Errorf("failed to get compare options: %w", err) + } + + resourceOverrides, err := mgr.GetResourceOverrides() + if err != nil { + return nil, fmt.Errorf("failed to get resource overrides: %w", err) + } + + for k, v := range resourceOverrides { + resourceUpdates := v.IgnoreResourceUpdates + if compareOptions.IgnoreDifferencesOnResourceUpdates { + resourceUpdates.JQPathExpressions = append(resourceUpdates.JQPathExpressions, v.IgnoreDifferences.JQPathExpressions...) + resourceUpdates.JSONPointers = append(resourceUpdates.JSONPointers, v.IgnoreDifferences.JSONPointers...) + resourceUpdates.ManagedFieldsManagers = append(resourceUpdates.ManagedFieldsManagers, v.IgnoreDifferences.ManagedFieldsManagers...) + } + // Set the IgnoreDifferences because these are the overrides used by Normalizers + v.IgnoreDifferences = resourceUpdates + v.IgnoreResourceUpdates = v1alpha1.OverrideIgnoreDiff{} + resourceOverrides[k] = v + } + + if compareOptions.IgnoreDifferencesOnResourceUpdates { + log.Info("Using diffing customizations to ignore resource updates") + } + + addIgnoreDiffItemOverrideToGK(resourceOverrides, "*/*", "/metadata/resourceVersion") + addIgnoreDiffItemOverrideToGK(resourceOverrides, "*/*", "/metadata/generation") + addIgnoreDiffItemOverrideToGK(resourceOverrides, "*/*", "/metadata/managedFields") + + return resourceOverrides, nil +} + +func (mgr *SettingsManager) GetIsIgnoreResourceUpdatesEnabled() (bool, error) { + argoCDCM, err := mgr.getConfigMap() + if err != nil { + return false, err + } + + if argoCDCM.Data[resourceIgnoreResourceUpdatesEnabledKey] == "" { + return false, nil + } + + return strconv.ParseBool(argoCDCM.Data[resourceIgnoreResourceUpdatesEnabledKey]) +} + // GetResourceOverrides loads Resource Overrides from argocd-cm ConfigMap func (mgr *SettingsManager) GetResourceOverrides() (map[string]v1alpha1.ResourceOverride, error) { argoCDCM, err := mgr.getConfigMap() @@ -910,6 +946,13 @@ func (mgr *SettingsManager) appendResourceOverridesFromSplitKeys(cmData map[stri return err } overrideVal.IgnoreDifferences = overrideIgnoreDiff + case "ignoreResourceUpdates": + overrideIgnoreUpdate := v1alpha1.OverrideIgnoreDiff{} + err := yaml.Unmarshal([]byte(v), &overrideIgnoreUpdate) + if err != nil { + return err + } + overrideVal.IgnoreResourceUpdates = overrideIgnoreUpdate case "knownTypeFields": var knownTypeFields []v1alpha1.KnownTypeField err := yaml.Unmarshal([]byte(v), &knownTypeFields) @@ -939,7 +982,7 @@ func convertToOverrideKey(groupKind string) (string, error) { } func GetDefaultDiffOptions() ArgoCDDiffOptions { - return ArgoCDDiffOptions{IgnoreAggregatedRoles: false} + return ArgoCDDiffOptions{IgnoreAggregatedRoles: false, IgnoreDifferencesOnResourceUpdates: false} } // GetResourceCompareOptions loads the resource compare options settings from the ConfigMap @@ -966,7 +1009,7 @@ func (mgr *SettingsManager) GetResourceCompareOptions() (ArgoCDDiffOptions, erro func (mgr *SettingsManager) GetHelmSettings() (*v1alpha1.HelmOptions, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get argo-cd config map: %v", err) } helmOptions := &v1alpha1.HelmOptions{} if value, ok := argoCDCM.Data[helmValuesFileSchemesKey]; ok { @@ -1002,7 +1045,7 @@ func (mgr *SettingsManager) GetKustomizeSettings() (*KustomizeSettings, error) { if strings.HasPrefix(k, kustomizeVersionKeyPrefix) { err = addKustomizeVersion(kustomizeVersionKeyPrefix, k, v, kustomizeVersionsMap) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to add kustomize version from %q: %w", k, err) } } @@ -1010,7 +1053,7 @@ func (mgr *SettingsManager) GetKustomizeSettings() (*KustomizeSettings, error) { if strings.HasPrefix(k, kustomizePathPrefixKey) { err = addKustomizeVersion(kustomizePathPrefixKey, k, v, kustomizeVersionsMap) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to add kustomize version from %q: %w", k, err) } } @@ -1070,7 +1113,7 @@ func (mgr *SettingsManager) GetRepositories() ([]Repository, error) { // Get the config map outside of the lock argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get argo-cd config map: %w", err) } mgr.mutex.Lock() @@ -1080,7 +1123,7 @@ func (mgr *SettingsManager) GetRepositories() ([]Repository, error) { if repositoriesStr != "" { err := yaml.Unmarshal([]byte(repositoriesStr), &repositories) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to unmarshal repositories from config map key %q: %w", repositoriesKey, err) } } mgr.reposCache = repositories @@ -1168,8 +1211,12 @@ func (mgr *SettingsManager) GetHelp() (*Help, error) { if !ok { chatText = "Chat now!" } + chatURL, ok := argoCDCM.Data[helpChatURL] + if !ok { + chatText = "" + } return &Help{ - ChatURL: argoCDCM.Data[helpChatURL], + ChatURL: chatURL, ChatText: chatText, BinaryURLs: getDownloadBinaryUrlsFromConfigMap(argoCDCM), }, nil @@ -1676,13 +1723,26 @@ func (a *ArgoCDSettings) oidcConfig() *oidcConfig { if a.OIDCConfigRAW == "" { return nil } - config, err := unmarshalOIDCConfig(a.OIDCConfigRAW) + configMap := map[string]interface{}{} + err := yaml.Unmarshal([]byte(a.OIDCConfigRAW), &configMap) + if err != nil { + log.Warnf("invalid oidc config: %v", err) + return nil + } + + configMap = ReplaceMapSecrets(configMap, a.Secrets) + data, err := yaml.Marshal(configMap) if err != nil { log.Warnf("invalid oidc config: %v", err) return nil } - config.ClientSecret = ReplaceStringSecret(config.ClientSecret, a.Secrets) - config.ClientID = ReplaceStringSecret(config.ClientID, a.Secrets) + + config, err := unmarshalOIDCConfig(string(data)) + if err != nil { + log.Warnf("invalid oidc config: %v", err) + return nil + } + return &config } @@ -1973,6 +2033,42 @@ func (mgr *SettingsManager) InitializeSettings(insecureModeEnabled bool) (*ArgoC return cdSettings, nil } +// ReplaceMapSecrets takes a json object and recursively looks for any secret key references in the +// object and replaces the value with the secret value +func ReplaceMapSecrets(obj map[string]interface{}, secretValues map[string]string) map[string]interface{} { + newObj := make(map[string]interface{}) + for k, v := range obj { + switch val := v.(type) { + case map[string]interface{}: + newObj[k] = ReplaceMapSecrets(val, secretValues) + case []interface{}: + newObj[k] = replaceListSecrets(val, secretValues) + case string: + newObj[k] = ReplaceStringSecret(val, secretValues) + default: + newObj[k] = val + } + } + return newObj +} + +func replaceListSecrets(obj []interface{}, secretValues map[string]string) []interface{} { + newObj := make([]interface{}, len(obj)) + for i, v := range obj { + switch val := v.(type) { + case map[string]interface{}: + newObj[i] = ReplaceMapSecrets(val, secretValues) + case []interface{}: + newObj[i] = replaceListSecrets(val, secretValues) + case string: + newObj[i] = ReplaceStringSecret(val, secretValues) + default: + newObj[i] = val + } + } + return newObj +} + // ReplaceStringSecret checks if given string is a secret key reference ( starts with $ ) and returns corresponding value from provided map func ReplaceStringSecret(val string, secretValues map[string]string) string { if val == "" || !strings.HasPrefix(val, "$") { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/text/text.go b/vendor/github.com/argoproj/argo-cd/v2/util/text/text.go deleted file mode 100644 index e1dc476600..0000000000 --- a/vendor/github.com/argoproj/argo-cd/v2/util/text/text.go +++ /dev/null @@ -1,18 +0,0 @@ -package text - -import ( - "strings" - "unicode/utf8" -) - -// truncates messages to n characters -func Trunc(message string, n int) string { - if utf8.RuneCountInString(message) > n { - return string([]rune(message)[0:n-3]) + "..." - } - return message -} - -func SemVer(s string) string { - return strings.ReplaceAll(s, "+", "") -} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml b/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml new file mode 100644 index 0000000000..db6e504a9a --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.codecov.yml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + threshold: 1% + patch: + default: + target: 70% +ignore: + - globoptions.go diff --git a/vendor/github.com/bmatcuk/doublestar/v4/.gitignore b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore new file mode 100644 index 0000000000..af212ecc28 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/.gitignore @@ -0,0 +1,32 @@ +# vi +*~ +*.swp +*.swo + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# test directory +test/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/LICENSE b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE new file mode 100644 index 0000000000..309c9d1d11 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Bob Matcuk + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/bmatcuk/doublestar/v4/README.md b/vendor/github.com/bmatcuk/doublestar/v4/README.md new file mode 100644 index 0000000000..be715ff69e --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/README.md @@ -0,0 +1,404 @@ +# doublestar + +Path pattern matching and globbing supporting `doublestar` (`**`) patterns. + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bmatcuk/doublestar)](https://pkg.go.dev/github.com/bmatcuk/doublestar/v4) +[![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master)](https://github.com/bmatcuk/doublestar/releases) +[![Build Status](https://github.com/bmatcuk/doublestar/actions/workflows/test.yml/badge.svg)](https://github.com/bmatcuk/doublestar/actions) +[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) +[![Sponsor](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/bmatcuk) + +## About + +#### [Upgrading?](UPGRADING.md) + +**doublestar** is a [golang] implementation of path pattern matching and +globbing with support for "doublestar" (aka globstar: `**`) patterns. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +```bash +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +## Installation + +**doublestar** can be installed via `go get`: + +```bash +go get github.com/bmatcuk/doublestar/v4 +``` + +To use it in your code, you must import it: + +```go +import "github.com/bmatcuk/doublestar/v4" +``` + +## Usage + +### ErrBadPattern + +```go +doublestar.ErrBadPattern +``` + +Returned by various functions to report that the pattern is malformed. At the +moment, this value is equal to `path.ErrBadPattern`, but, for portability, this +equivalence should probably not be relied upon. + +### Match + +```go +func Match(pattern, name string) (bool, error) +``` + +Match returns true if `name` matches the file name `pattern` ([see +"patterns"]). `name` and `pattern` are split on forward slash (`/`) characters +and may be relative or absolute. + +Match requires pattern to match all of name, not just a substring. The only +possible returned error is `ErrBadPattern`, when pattern is malformed. + +Note: this is meant as a drop-in replacement for `path.Match()` which always +uses `'/'` as the path separator. If you want to support systems which use a +different path separator (such as Windows), what you want is `PathMatch()`. +Alternatively, you can run `filepath.ToSlash()` on both pattern and name and +then use this function. + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + + +### PathMatch + +```go +func PathMatch(pattern, name string) (bool, error) +``` + +PathMatch returns true if `name` matches the file name `pattern` ([see +"patterns"]). The difference between Match and PathMatch is that PathMatch will +automatically use your system's path separator to split `name` and `pattern`. +On systems where the path separator is `'\'`, escaping will be disabled. + +Note: this is meant as a drop-in replacement for `filepath.Match()`. It assumes +that both `pattern` and `name` are using the system's path separator. If you +can't be sure of that, use `filepath.ToSlash()` on both `pattern` and `name`, +and then use the `Match()` function instead. + +### GlobOption + +Options that may be passed to `Glob`, `GlobWalk`, or `FilepathGlob`. Any number +of options may be passed to these functions, and in any order, as the last +argument(s). + +```go +WithFailOnIOErrors() +``` + +If passed, doublestar will abort and return IO errors when encountered. Note +that if the glob pattern references a path that does not exist (such as +`nonexistent/path/*`), this is _not_ considered an IO error: it is considered a +pattern with no matches. + +```go +WithFailOnPatternNotExist() +``` + +If passed, doublestar will abort and return `doublestar.ErrPatternNotExist` if +the pattern references a path that does not exist before any meta characters +such as `nonexistent/path/*`. Note that alts (ie, `{...}`) are expanded before +this check. In other words, a pattern such as `{a,b}/*` may fail if either `a` +or `b` do not exist but `*/{a,b}` will never fail because the star may match +nothing. + +```go +WithFilesOnly() +``` + +If passed, doublestar will only return "files" from `Glob`, `GlobWalk`, or +`FilepathGlob`. In this context, "files" are anything that is not a directory +or a symlink to a directory. + +Note: if combined with the WithNoFollow option, symlinks to directories _will_ +be included in the result since no attempt is made to follow the symlink. + +```go +WithNoFollow() +``` + +If passed, doublestar will not follow symlinks while traversing the filesystem. +However, due to io/fs's _very_ poor support for querying the filesystem about +symlinks, there's a caveat here: if part of the pattern before any meta +characters contains a reference to a symlink, it will be followed. For example, +a pattern such as `path/to/symlink/*` will be followed assuming it is a valid +symlink to a directory. However, from this same example, a pattern such as +`path/to/**` will not traverse the `symlink`, nor would `path/*/symlink/*` + +Note: if combined with the WithFilesOnly option, symlinks to directories _will_ +be included in the result since no attempt is made to follow the symlink. + +### Glob + +```go +func Glob(fsys fs.FS, pattern string, opts ...GlobOption) ([]string, error) +``` + +Glob returns the names of all files matching pattern or nil if there is no +matching file. The syntax of patterns is the same as in `Match()`. The pattern +may describe hierarchical names such as `usr/*/bin/ed`. + +Glob ignores file system errors such as I/O errors reading directories by +default. The only possible returned error is `ErrBadPattern`, reporting that +the pattern is malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Note: this is meant as a drop-in replacement for `io/fs.Glob()`. Like +`io/fs.Glob()`, this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use `filepath.ToSlash()` on your pattern +before calling `Glob()`. + +Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +will return no results and no errors. This seems to be a [conscious +decision](https://github.com/golang/go/issues/44092#issuecomment-774132549), +even if counter-intuitive. You can use [SplitPattern] to divide a pattern into +a base path (to initialize an `FS` object) and pattern. + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + +### GlobWalk + +```go +type GlobWalkFunc func(path string, d fs.DirEntry) error + +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc, opts ...GlobOption) error +``` + +GlobWalk calls the callback function `fn` for every file matching pattern. The +syntax of pattern is the same as in Match() and the behavior is the same as +Glob(), with regard to limitations (such as patterns containing `/./`, `/../`, +or starting with `/`). The pattern may describe hierarchical names such as +usr/*/bin/ed. + +GlobWalk may have a small performance benefit over Glob if you do not need a +slice of matches because it can avoid allocating memory for the matches. +Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for each +match, and lets you quit early by returning a non-nil error from your callback +function. Like `io/fs.WalkDir`, if your callback returns `SkipDir`, GlobWalk +will skip the current directory. This means that if the current path _is_ a +directory, GlobWalk will not recurse into it. If the current path is not a +directory, the rest of the parent directory will be skipped. + +GlobWalk ignores file system errors such as I/O errors reading directories by +default. GlobWalk may return `ErrBadPattern`, reporting that the pattern is +malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Additionally, if the callback function `fn` returns an error, GlobWalk will +exit immediately and return that error. + +Like Glob(), this function assumes that your pattern uses `/` as the path +separator even if that's not correct for your OS (like Windows). If you aren't +sure if that's the case, you can use filepath.ToSlash() on your pattern before +calling GlobWalk(). + +Note: users should _not_ count on the returned error, +`doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. + +### FilepathGlob + +```go +func FilepathGlob(pattern string, opts ...GlobOption) (matches []string, err error) +``` + +FilepathGlob returns the names of all files matching pattern or nil if there is +no matching file. The syntax of pattern is the same as in Match(). The pattern +may describe hierarchical names such as usr/*/bin/ed. + +FilepathGlob ignores file system errors such as I/O errors reading directories +by default. The only possible returned error is `ErrBadPattern`, reporting that +the pattern is malformed. + +To enable aborting on I/O errors, the `WithFailOnIOErrors` option can be +passed. + +Note: FilepathGlob is a convenience function that is meant as a drop-in +replacement for `path/filepath.Glob()` for users who don't need the +complication of io/fs. Basically, it: + +* Runs `filepath.Clean()` and `ToSlash()` on the pattern +* Runs `SplitPattern()` to get a base path and a pattern to Glob +* Creates an FS object from the base path and `Glob()s` on the pattern +* Joins the base path with all of the matches from `Glob()` + +Returned paths will use the system's path separator, just like +`filepath.Glob()`. + +Note: the returned error `doublestar.ErrBadPattern` is not equal to +`filepath.ErrBadPattern`. + +### SplitPattern + +```go +func SplitPattern(p string) (base, pattern string) +``` + +SplitPattern is a utility function. Given a pattern, SplitPattern will return +two strings: the first string is everything up to the last slash (`/`) that +appears _before_ any unescaped "meta" characters (ie, `*?[{`). The second +string is everything after that slash. For example, given the pattern: + +``` +../../path/to/meta*/** + ^----------- split here +``` + +SplitPattern returns "../../path/to" and "meta*/**". This is useful for +initializing os.DirFS() to call Glob() because Glob() will silently fail if +your pattern includes `/./` or `/../`. For example: + +```go +base, pattern := SplitPattern("../../path/to/meta*/**") +fsys := os.DirFS(base) +matches, err := Glob(fsys, pattern) +``` + +If SplitPattern cannot find somewhere to split the pattern (for example, +`meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in this +example). + +Of course, it is your responsibility to decide if the returned base path is +"safe" in the context of your application. Perhaps you could use Match() to +validate against a list of approved base directories? + +### ValidatePattern + +```go +func ValidatePattern(s string) bool +``` + +Validate a pattern. Patterns are validated while they run in Match(), +PathMatch(), and Glob(), so, you normally wouldn't need to call this. However, +there are cases where this might be useful: for example, if your program allows +a user to enter a pattern that you'll run at a later time, you might want to +validate it. + +ValidatePattern assumes your pattern uses '/' as the path separator. + +### ValidatePathPattern + +```go +func ValidatePathPattern(s string) bool +``` + +Like ValidatePattern, only uses your OS path separator. In other words, use +ValidatePattern if you would normally use Match() or Glob(). Use +ValidatePathPattern if you would normally use PathMatch(). Keep in mind, Glob() +requires '/' separators, even if your OS uses something else. + +### Patterns + +**doublestar** supports the following special terms in the patterns: + +Special Terms | Meaning +------------- | ------- +`*` | matches any sequence of non-path-separators +`/**/` | matches zero or more directories +`?` | matches any single non-path-separator character +`[class]` | matches any single non-path-separator character against a class of characters ([see "character classes"]) +`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches + +Any character with a special meaning can be escaped with a backslash (`\`). + +A doublestar (`**`) should appear surrounded by path separators such as `/**/`. +A mid-pattern doublestar (`**`) behaves like bash's globstar option: a pattern +such as `path/to/**.txt` would return the same results as `path/to/*.txt`. The +pattern you're looking for is `path/to/**/*.txt`. + +#### Character Classes + +Character classes support the following: + +Class | Meaning +---------- | ------- +`[abc]` | matches any single character within the set +`[a-z]` | matches any single character in the range +`[^class]` | matches any single character which does *not* match the class +`[!class]` | same as `^`: negates the class + +## Performance + +``` +goos: darwin +goarch: amd64 +pkg: github.com/bmatcuk/doublestar/v4 +cpu: Intel(R) Core(TM) i7-4870HQ CPU @ 2.50GHz +BenchmarkMatch-8 285639 3868 ns/op 0 B/op 0 allocs/op +BenchmarkGoMatch-8 286945 3726 ns/op 0 B/op 0 allocs/op +BenchmarkPathMatch-8 320511 3493 ns/op 0 B/op 0 allocs/op +BenchmarkGoPathMatch-8 304236 3434 ns/op 0 B/op 0 allocs/op +BenchmarkGlob-8 466 2501123 ns/op 190225 B/op 2849 allocs/op +BenchmarkGlobWalk-8 476 2536293 ns/op 184017 B/op 2750 allocs/op +BenchmarkGoGlob-8 463 2574836 ns/op 194249 B/op 2929 allocs/op +``` + +These benchmarks (in `doublestar_test.go`) compare Match() to path.Match(), +PathMath() to filepath.Match(), and Glob() + GlobWalk() to io/fs.Glob(). They +only run patterns that the standard go packages can understand as well (so, no +`{alts}` or `**`) for a fair comparison. Of course, alts and doublestars will +be less performant than the other pattern meta characters. + +Alts are essentially like running multiple patterns, the number of which can +get large if your pattern has alts nested inside alts. This affects both +matching (ie, Match()) and globbing (Glob()). + +`**` performance in matching is actually pretty similar to a regular `*`, but +can cause a large number of reads when globbing as it will need to recursively +traverse your filesystem. + +## Sponsors +I started this project in 2014 in my spare time and have been maintaining it +ever since. In that time, it has grown into one of the most popular globbing +libraries in the Go ecosystem. So, if **doublestar** is a useful library in +your project, consider [sponsoring] my work! I'd really appreciate it! + +[![reviewpad](../sponsors/reviewpad.png?raw=true)](https://reviewpad.com/) + +Thanks for sponsoring me! + +## License + +[MIT License](LICENSE) + +[SplitPattern]: #splitpattern +[doublestar]: https://github.com/bmatcuk/doublestar +[golang]: http://golang.org/ +[io/fs]: https://pkg.go.dev/io/fs +[see "character classes"]: #character-classes +[see "patterns"]: #patterns +[sponsoring]: https://github.com/sponsors/bmatcuk diff --git a/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md new file mode 100644 index 0000000000..25aace3db0 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/UPGRADING.md @@ -0,0 +1,63 @@ +# Upgrading from v3 to v4 + +v4 is a complete rewrite with a focus on performance. Additionally, +[doublestar] has been updated to use the new [io/fs] package for filesystem +access. As a result, it is only supported by [golang] v1.16+. + +`Match()` and `PathMatch()` mostly did not change, besides big performance +improvements. Their API is the same. However, note the following corner cases: + +* In previous versions of [doublestar], `PathMatch()` could accept patterns + that used either platform-specific path separators, or `/`. This was + undocumented and didn't match `filepath.Match()`. In v4, both `pattern` and + `name` must be using appropriate path separators for the platform. You can + use `filepath.FromSlash()` to change `/` to platform-specific separators if + you aren't sure. +* In previous versions of [doublestar], a pattern such as `path/to/a/**` would + _not_ match `path/to/a`. In v4, this pattern _will_ match because if `a` was + a directory, `Glob()` would return it. In other words, the following returns + true: `Match("path/to/a/**", "path/to/a")` + +`Glob()` changed from using a [doublestar]-specific filesystem abstraction (the +`OS` interface) to the [io/fs] package. As a result, it now takes a `fs.FS` as +its first argument. This change has a couple ramifications: + +* Like `io/fs.Glob`, `pattern` must use a `/` as path separator, even on + platforms that use something else. You can use `filepath.ToSlash()` on your + patterns if you aren't sure. +* Patterns that contain `/./` or `/../` are invalid. The [io/fs] package + rejects them, returning an IO error. Since `Glob()` ignores IO errors, it'll + end up being silently rejected. You can run `path.Clean()` to ensure they are + removed from the pattern. + +v4 also added a `GlobWalk()` function that is slightly more performant than +`Glob()` if you just need to iterate over the results and don't need a string +slice. You also get `fs.DirEntry` objects for each result, and can quit early +if your callback returns an error. + +# Upgrading from v2 to v3 + +v3 introduced using `!` to negate character classes, in addition to `^`. If any +of your patterns include a character class that starts with an exclamation mark +(ie, `[!...]`), you'll need to update the pattern to escape or move the +exclamation mark. Note that, like the caret (`^`), it only negates the +character class if it is the first character in the character class. + +# Upgrading from v1 to v2 + +The change from v1 to v2 was fairly minor: the return type of the `Open` method +on the `OS` interface was changed from `*os.File` to `File`, a new interface +exported by doublestar. The new `File` interface only defines the functionality +doublestar actually needs (`io.Closer` and `Readdir`), making it easier to use +doublestar with [go-billy], [afero], or something similar. If you were using +this functionality, updating should be as easy as updating `Open's` return +type, since `os.File` already implements `doublestar.File`. + +If you weren't using this functionality, updating should be as easy as changing +your dependencies to point to v2. + +[afero]: https://github.com/spf13/afero +[doublestar]: https://github.com/bmatcuk/doublestar +[go-billy]: https://github.com/src-d/go-billy +[golang]: http://golang.org/ +[io/fs]: https://golang.org/pkg/io/fs/ diff --git a/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go new file mode 100644 index 0000000000..210fd40ceb --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/doublestar.go @@ -0,0 +1,13 @@ +package doublestar + +import ( + "errors" + "path" +) + +// ErrBadPattern indicates a pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// ErrPatternNotExist indicates that the pattern passed to Glob, GlobWalk, or +// FilepathGlob references a path that does not exist. +var ErrPatternNotExist = errors.New("pattern does not exist") diff --git a/vendor/github.com/bmatcuk/doublestar/v4/glob.go b/vendor/github.com/bmatcuk/doublestar/v4/glob.go new file mode 100644 index 0000000000..519601b15c --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/glob.go @@ -0,0 +1,473 @@ +package doublestar + +import ( + "errors" + "io/fs" + "path" +) + +// Glob returns the names of all files matching pattern or nil if there is no +// matching file. The syntax of pattern is the same as in Match(). The pattern +// may describe hierarchical names such as usr/*/bin/ed. +// +// Glob ignores file system errors such as I/O errors reading directories by +// default. The only possible returned error is ErrBadPattern, reporting that +// the pattern is malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Note: this is meant as a drop-in replacement for io/fs.Glob(). Like +// io/fs.Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling Glob(). +// +// Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/` +// will return no results and no errors. You can use SplitPattern to divide a +// pattern into a base path (to initialize an `FS` object) and pattern. +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func Glob(fsys fs.FS, pattern string, opts ...GlobOption) ([]string, error) { + if !ValidatePattern(pattern) { + return nil, ErrBadPattern + } + + g := newGlob(opts...) + + if hasMidDoubleStar(pattern) { + // If the pattern has a `**` anywhere but the very end, GlobWalk is more + // performant because it can get away with less allocations. If the pattern + // ends in a `**`, both methods are pretty much the same, but Glob has a + // _very_ slight advantage because of lower function call overhead. + var matches []string + err := g.doGlobWalk(fsys, pattern, true, true, func(p string, d fs.DirEntry) error { + matches = append(matches, p) + return nil + }) + return matches, err + } + return g.doGlob(fsys, pattern, nil, true, true) +} + +// Does the actual globbin' +// - firstSegment is true if we're in the first segment of the pattern, ie, +// the right-most part where we can match files. If it's false, we're +// somewhere in the middle (or at the beginning) and can only match +// directories since there are path segments above us. +// - beforeMeta is true if we're exploring segments before any meta +// characters, ie, in a pattern such as `path/to/file*.txt`, the `path/to/` +// bit does not contain any meta characters. +func (g *glob) doGlob(fsys fs.FS, pattern string, m []string, firstSegment, beforeMeta bool) (matches []string, err error) { + matches = m + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + // The pattern may contain escaped wildcard characters for an exact path match. + path := unescapeMeta(pattern) + pathInfo, pathExists, pathErr := g.exists(fsys, path, beforeMeta) + if pathErr != nil { + return nil, pathErr + } + + if pathExists && (!firstSegment || !g.filesOnly || !pathInfo.IsDir()) { + matches = append(matches, path) + } + + return + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + if splitIdx != -1 { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } else { + // otherwise, we have to handle the alts: + return g.globAlts(fsys, pattern, openingIdx, splitIdx, matches, firstSegment, beforeMeta) + } + } else { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart { + return g.globDir(fsys, dir, pattern, matches, firstSegment, beforeMeta) + } + + var dirs []string + dirs, err = g.doGlob(fsys, dir, matches, false, beforeMeta) + if err != nil { + return + } + for _, d := range dirs { + matches, err = g.globDir(fsys, d, pattern, matches, firstSegment, false) + if err != nil { + return + } + } + + return +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func (g *glob) globAlts(fsys fs.FS, pattern string, openingIdx, closingIdx int, m []string, firstSegment, beforeMeta bool) (matches []string, err error) { + matches = m + + var dirs []string + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + dirs = []string{""} + } else { + // our alts have a common prefix that we can process first + dirs, err = g.doGlob(fsys, pattern[:splitIdx], matches, false, beforeMeta) + if err != nil { + return + } + + startIdx = splitIdx + 1 + } + + for _, d := range dirs { + patIdx := openingIdx + 1 + altResultsStartIdx := len(matches) + thisResultStartIdx := altResultsStartIdx + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + matches, err = g.doGlob(fsys, alt, matches, firstSegment, beforeMeta) + if err != nil { + return + } + + matchesLen := len(matches) + if altResultsStartIdx != thisResultStartIdx && thisResultStartIdx != matchesLen { + // Alts can result in matches that aren't sorted, or, worse, duplicates + // (consider the trivial pattern `path/to/{a,*}`). Since doGlob returns + // sorted results, we can do a sort of in-place merge and remove + // duplicates. But, we only need to do this if this isn't the first alt + // (ie, `altResultsStartIdx != thisResultsStartIdx`) and if the latest + // alt actually added some matches (`thisResultStartIdx != + // len(matches)`) + matches = sortAndRemoveDups(matches, altResultsStartIdx, thisResultStartIdx, matchesLen) + + // length of matches may have changed + thisResultStartIdx = len(matches) + } else { + thisResultStartIdx = matchesLen + } + + patIdx = nextIdx + 1 + } + } + + return +} + +// find files/subdirectories in the given `dir` that match `pattern` +func (g *glob) globDir(fsys fs.FS, dir, pattern string, matches []string, canMatchFiles, beforeMeta bool) (m []string, e error) { + m = matches + + if pattern == "" { + if !canMatchFiles || !g.filesOnly { + // pattern can be an empty string if the original pattern ended in a + // slash, in which case, we should just return dir, but only if it + // actually exists and it's a directory (or a symlink to a directory) + _, isDir, err := g.isPathDir(fsys, dir, beforeMeta) + if err != nil { + return nil, err + } + if isDir { + m = append(m, dir) + } + } + return + } + + if pattern == "**" { + return g.globDoubleStar(fsys, dir, m, canMatchFiles, beforeMeta) + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + e = g.handlePatternNotExist(beforeMeta) + } else { + e = g.forwardErrIfFailOnIOErrors(err) + } + return + } + + var matched bool + for _, info := range dirs { + name := info.Name() + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + matched = canMatchFiles + if !matched || g.filesOnly { + matched, e = g.isDir(fsys, dir, name, info) + if e != nil { + return + } + if canMatchFiles { + // if we're here, it's because g.filesOnly + // is set and we don't want directories + matched = !matched + } + } + if matched { + m = append(m, path.Join(dir, name)) + } + } + } + + return +} + +func (g *glob) globDoubleStar(fsys fs.FS, dir string, matches []string, canMatchFiles, beforeMeta bool) ([]string, error) { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return matches, g.handlePatternNotExist(beforeMeta) + } else { + return matches, g.forwardErrIfFailOnIOErrors(err) + } + } + + if !g.filesOnly { + // `**` can match *this* dir, so add it + matches = append(matches, dir) + } + + for _, info := range dirs { + name := info.Name() + isDir, err := g.isDir(fsys, dir, name, info) + if err != nil { + return nil, err + } + if isDir { + matches, err = g.globDoubleStar(fsys, path.Join(dir, name), matches, canMatchFiles, false) + if err != nil { + return nil, err + } + } else if canMatchFiles { + matches = append(matches, path.Join(dir, name)) + } + } + + return matches, nil +} + +// Returns true if the pattern has a doublestar in the middle of the pattern. +// In this case, GlobWalk is faster because it can get away with less +// allocations. However, Glob has a _very_ slight edge if the pattern ends in +// `**`. +func hasMidDoubleStar(p string) bool { + // subtract 3: 2 because we want to return false if the pattern ends in `**` + // (Glob is _very_ slightly faster in that case), and the extra 1 because our + // loop checks p[i] and p[i+1]. + l := len(p) - 3 + for i := 0; i < l; i++ { + if p[i] == '\\' { + // escape next byte + i++ + } else if p[i] == '*' && p[i+1] == '*' { + return true + } + } + return false +} + +// Returns the index of the first unescaped meta character, or negative 1. +func indexMeta(s string) int { + var c byte + l := len(s) + for i := 0; i < l; i++ { + c = s[i] + if c == '*' || c == '?' || c == '[' || c == '{' { + return i + } else if c == '\\' { + // skip next byte + i++ + } + } + return -1 +} + +// Returns the index of the last unescaped slash or closing alt (`}`) in the +// string, or negative 1. +func lastIndexSlashOrAlt(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if (s[i] == '/' || s[i] == '}') && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Returns the index of the last unescaped slash in the string, or negative 1. +func lastIndexSlash(s string) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '/' && (i == 0 || s[i-1] != '\\') { + return i + } + } + return -1 +} + +// Assuming the byte after the end of `s` is a closing `}`, this function will +// find the index of the matching `{`. That is, it'll skip over any nested `{}` +// and account for escaping. +func indexMatchedOpeningAlt(s string) int { + alts := 1 + for i := len(s) - 1; i >= 0; i-- { + if s[i] == '}' && (i == 0 || s[i-1] != '\\') { + alts++ + } else if s[i] == '{' && (i == 0 || s[i-1] != '\\') { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} + +// Returns true if the path exists +func (g *glob) exists(fsys fs.FS, name string, beforeMeta bool) (fs.FileInfo, bool, error) { + // name might end in a slash, but Stat doesn't like that + namelen := len(name) + if namelen > 1 && name[namelen-1] == '/' { + name = name[:namelen-1] + } + + info, err := fs.Stat(fsys, name) + if errors.Is(err, fs.ErrNotExist) { + return nil, false, g.handlePatternNotExist(beforeMeta) + } + return info, err == nil, g.forwardErrIfFailOnIOErrors(err) +} + +// Returns true if the path exists and is a directory or a symlink to a +// directory +func (g *glob) isPathDir(fsys fs.FS, name string, beforeMeta bool) (fs.FileInfo, bool, error) { + info, err := fs.Stat(fsys, name) + if errors.Is(err, fs.ErrNotExist) { + return nil, false, g.handlePatternNotExist(beforeMeta) + } + return info, err == nil && info.IsDir(), g.forwardErrIfFailOnIOErrors(err) +} + +// Returns whether or not the given DirEntry is a directory. If the DirEntry +// represents a symbolic link, the link is followed by running fs.Stat() on +// `path.Join(dir, name)` (if dir is "", name will be used without joining) +func (g *glob) isDir(fsys fs.FS, dir, name string, info fs.DirEntry) (bool, error) { + if !g.noFollow && (info.Type()&fs.ModeSymlink) > 0 { + p := name + if dir != "" { + p = path.Join(dir, name) + } + finfo, err := fs.Stat(fsys, p) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // this function is only ever called while expanding a glob, so it can + // never return ErrPatternNotExist + return false, nil + } + return false, g.forwardErrIfFailOnIOErrors(err) + } + return finfo.IsDir(), nil + } + return info.IsDir(), nil +} + +// Builds a string from an alt +func buildAlt(prefix, pattern string, startIdx, openingIdx, currentIdx, nextIdx, afterIdx int) string { + // pattern: + // ignored/start{alts,go,here}remaining - len = 36 + // | | | | ^--- afterIdx = 27 + // | | | \--------- nextIdx = 21 + // | | \----------- currentIdx = 19 + // | \----------------- openingIdx = 13 + // \---------------------- startIdx = 8 + // + // result: + // prefix/startgoremaining - len = 7 + 5 + 2 + 9 = 23 + var buf []byte + patLen := len(pattern) + size := (openingIdx - startIdx) + (nextIdx - currentIdx) + (patLen - afterIdx) + if prefix != "" && prefix != "." { + buf = make([]byte, 0, size+len(prefix)+1) + buf = append(buf, prefix...) + buf = append(buf, '/') + } else { + buf = make([]byte, 0, size) + } + buf = append(buf, pattern[startIdx:openingIdx]...) + buf = append(buf, pattern[currentIdx:nextIdx]...) + if afterIdx < patLen { + buf = append(buf, pattern[afterIdx:]...) + } + return string(buf) +} + +// Running alts can produce results that are not sorted, and, worse, can cause +// duplicates (consider the trivial pattern `path/to/{a,*}`). Since we know +// each run of doGlob is sorted, we can basically do the "merge" step of a +// merge sort in-place. +func sortAndRemoveDups(matches []string, idx1, idx2, l int) []string { + var tmp string + for ; idx1 < idx2; idx1++ { + if matches[idx1] < matches[idx2] { + // order is correct + continue + } else if matches[idx1] > matches[idx2] { + // need to swap and then re-sort matches above idx2 + tmp = matches[idx1] + matches[idx1] = matches[idx2] + + shft := idx2 + 1 + for ; shft < l && matches[shft] < tmp; shft++ { + matches[shft-1] = matches[shft] + } + matches[shft-1] = tmp + } else { + // duplicate - shift matches above idx2 down one and decrement l + for shft := idx2 + 1; shft < l; shft++ { + matches[shft-1] = matches[shft] + } + if l--; idx2 == l { + // nothing left to do... matches[idx2:] must have been full of dups + break + } + } + } + return matches[:l] +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go b/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go new file mode 100644 index 0000000000..9483c4bb00 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/globoptions.go @@ -0,0 +1,144 @@ +package doublestar + +import "strings" + +// glob is an internal type to store options during globbing. +type glob struct { + failOnIOErrors bool + failOnPatternNotExist bool + filesOnly bool + noFollow bool +} + +// GlobOption represents a setting that can be passed to Glob, GlobWalk, and +// FilepathGlob. +type GlobOption func(*glob) + +// Construct a new glob object with the given options +func newGlob(opts ...GlobOption) *glob { + g := &glob{} + for _, opt := range opts { + opt(g) + } + return g +} + +// WithFailOnIOErrors is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will abort and return IO errors when +// encountered. Note that if the glob pattern references a path that does not +// exist (such as `nonexistent/path/*`), this is _not_ considered an IO error: +// it is considered a pattern with no matches. +// +func WithFailOnIOErrors() GlobOption { + return func(g *glob) { + g.failOnIOErrors = true + } +} + +// WithFailOnPatternNotExist is an option that can be passed to Glob, GlobWalk, +// or FilepathGlob. If passed, doublestar will abort and return +// ErrPatternNotExist if the pattern references a path that does not exist +// before any meta charcters such as `nonexistent/path/*`. Note that alts (ie, +// `{...}`) are expanded before this check. In other words, a pattern such as +// `{a,b}/*` may fail if either `a` or `b` do not exist but `*/{a,b}` will +// never fail because the star may match nothing. +// +func WithFailOnPatternNotExist() GlobOption { + return func(g *glob) { + g.failOnPatternNotExist = true + } +} + +// WithFilesOnly is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will only return files that match the +// pattern, not directories. +// +// Note: if combined with the WithNoFollow option, symlinks to directories +// _will_ be included in the result since no attempt is made to follow the +// symlink. +// +func WithFilesOnly() GlobOption { + return func(g *glob) { + g.filesOnly = true + } +} + +// WithNoFollow is an option that can be passed to Glob, GlobWalk, or +// FilepathGlob. If passed, doublestar will not follow symlinks while +// traversing the filesystem. However, due to io/fs's _very_ poor support for +// querying the filesystem about symlinks, there's a caveat here: if part of +// the pattern before any meta characters contains a reference to a symlink, it +// will be followed. For example, a pattern such as `path/to/symlink/*` will be +// followed assuming it is a valid symlink to a directory. However, from this +// same example, a pattern such as `path/to/**` will not traverse the +// `symlink`, nor would `path/*/symlink/*` +// +// Note: if combined with the WithFilesOnly option, symlinks to directories +// _will_ be included in the result since no attempt is made to follow the +// symlink. +// +func WithNoFollow() GlobOption { + return func(g *glob) { + g.noFollow = true + } +} + +// forwardErrIfFailOnIOErrors is used to wrap the return values of I/O +// functions. When failOnIOErrors is enabled, it will return err; otherwise, it +// always returns nil. +// +func (g *glob) forwardErrIfFailOnIOErrors(err error) error { + if g.failOnIOErrors { + return err + } + return nil +} + +// handleErrNotExist handles fs.ErrNotExist errors. If +// WithFailOnPatternNotExist has been enabled and canFail is true, this will +// return ErrPatternNotExist. Otherwise, it will return nil. +// +func (g *glob) handlePatternNotExist(canFail bool) error { + if canFail && g.failOnPatternNotExist { + return ErrPatternNotExist + } + return nil +} + +// Format options for debugging/testing purposes +func (g *glob) GoString() string { + var b strings.Builder + b.WriteString("opts: ") + + hasOpts := false + if g.failOnIOErrors { + b.WriteString("WithFailOnIOErrors") + hasOpts = true + } + if g.failOnPatternNotExist { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithFailOnPatternNotExist") + hasOpts = true + } + if g.filesOnly { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithFilesOnly") + hasOpts = true + } + if g.noFollow { + if hasOpts { + b.WriteString(", ") + } + b.WriteString("WithNoFollow") + hasOpts = true + } + + if !hasOpts { + b.WriteString("nil") + } + return b.String() +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go new file mode 100644 index 0000000000..84e764f0e2 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/globwalk.go @@ -0,0 +1,414 @@ +package doublestar + +import ( + "errors" + "io/fs" + "path" + "path/filepath" + "strings" +) + +// If returned from GlobWalkFunc, will cause GlobWalk to skip the current +// directory. In other words, if the current path is a directory, GlobWalk will +// not recurse into it. Otherwise, GlobWalk will skip the rest of the current +// directory. +var SkipDir = fs.SkipDir + +// Callback function for GlobWalk(). If the function returns an error, GlobWalk +// will end immediately and return the same error. +type GlobWalkFunc func(path string, d fs.DirEntry) error + +// GlobWalk calls the callback function `fn` for every file matching pattern. +// The syntax of pattern is the same as in Match() and the behavior is the same +// as Glob(), with regard to limitations (such as patterns containing `/./`, +// `/../`, or starting with `/`). The pattern may describe hierarchical names +// such as usr/*/bin/ed. +// +// GlobWalk may have a small performance benefit over Glob if you do not need a +// slice of matches because it can avoid allocating memory for the matches. +// Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for +// each match, and lets you quit early by returning a non-nil error from your +// callback function. Like `io/fs.WalkDir`, if your callback returns `SkipDir`, +// GlobWalk will skip the current directory. This means that if the current +// path _is_ a directory, GlobWalk will not recurse into it. If the current +// path is not a directory, the rest of the parent directory will be skipped. +// +// GlobWalk ignores file system errors such as I/O errors reading directories +// by default. GlobWalk may return ErrBadPattern, reporting that the pattern is +// malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Additionally, if the callback function `fn` returns an error, GlobWalk will +// exit immediately and return that error. +// +// Like Glob(), this function assumes that your pattern uses `/` as the path +// separator even if that's not correct for your OS (like Windows). If you +// aren't sure if that's the case, you can use filepath.ToSlash() on your +// pattern before calling GlobWalk(). +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc, opts ...GlobOption) error { + if !ValidatePattern(pattern) { + return ErrBadPattern + } + + g := newGlob(opts...) + return g.doGlobWalk(fsys, pattern, true, true, fn) +} + +// Actually execute GlobWalk +// - firstSegment is true if we're in the first segment of the pattern, ie, +// the right-most part where we can match files. If it's false, we're +// somewhere in the middle (or at the beginning) and can only match +// directories since there are path segments above us. +// - beforeMeta is true if we're exploring segments before any meta +// characters, ie, in a pattern such as `path/to/file*.txt`, the `path/to/` +// bit does not contain any meta characters. +func (g *glob) doGlobWalk(fsys fs.FS, pattern string, firstSegment, beforeMeta bool, fn GlobWalkFunc) error { + patternStart := indexMeta(pattern) + if patternStart == -1 { + // pattern doesn't contain any meta characters - does a file matching the + // pattern exist? + // The pattern may contain escaped wildcard characters for an exact path match. + path := unescapeMeta(pattern) + info, pathExists, err := g.exists(fsys, path, beforeMeta) + if pathExists && (!firstSegment || !g.filesOnly || !info.IsDir()) { + err = fn(path, dirEntryFromFileInfo(info)) + if err == SkipDir { + err = nil + } + } + return err + } + + dir := "." + splitIdx := lastIndexSlashOrAlt(pattern) + if splitIdx != -1 { + if pattern[splitIdx] == '}' { + openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx]) + if openingIdx == -1 { + // if there's no matching opening index, technically Match() will treat + // an unmatched `}` as nothing special, so... we will, too! + splitIdx = lastIndexSlash(pattern[:splitIdx]) + if splitIdx != -1 { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } else { + // otherwise, we have to handle the alts: + return g.globAltsWalk(fsys, pattern, openingIdx, splitIdx, firstSegment, beforeMeta, fn) + } + } else { + dir = pattern[:splitIdx] + pattern = pattern[splitIdx+1:] + } + } + + // if `splitIdx` is less than `patternStart`, we know `dir` has no meta + // characters. They would be equal if they are both -1, which means `dir` + // will be ".", and we know that doesn't have meta characters either. + if splitIdx <= patternStart { + return g.globDirWalk(fsys, dir, pattern, firstSegment, beforeMeta, fn) + } + + return g.doGlobWalk(fsys, dir, false, beforeMeta, func(p string, d fs.DirEntry) error { + if err := g.globDirWalk(fsys, p, pattern, firstSegment, false, fn); err != nil { + return err + } + return nil + }) +} + +// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the +// indexes of `{` and `}`, respectively +func (g *glob) globAltsWalk(fsys fs.FS, pattern string, openingIdx, closingIdx int, firstSegment, beforeMeta bool, fn GlobWalkFunc) (err error) { + var matches []DirEntryWithFullPath + startIdx := 0 + afterIdx := closingIdx + 1 + splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx]) + if splitIdx == -1 || pattern[splitIdx] == '}' { + // no common prefix + matches, err = g.doGlobAltsWalk(fsys, "", pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, beforeMeta, matches) + if err != nil { + return + } + } else { + // our alts have a common prefix that we can process first + startIdx = splitIdx + 1 + innerBeforeMeta := beforeMeta && !hasMetaExceptAlts(pattern[:splitIdx]) + err = g.doGlobWalk(fsys, pattern[:splitIdx], false, beforeMeta, func(p string, d fs.DirEntry) (e error) { + matches, e = g.doGlobAltsWalk(fsys, p, pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, innerBeforeMeta, matches) + return e + }) + if err != nil { + return + } + } + + skip := "" + for _, m := range matches { + if skip != "" { + // Because matches are sorted, we know that descendants of the skipped + // item must come immediately after the skipped item. If we find an item + // that does not have a prefix matching the skipped item, we know we're + // done skipping. I'm using strings.HasPrefix here because + // filepath.HasPrefix has been marked deprecated (and just calls + // strings.HasPrefix anyway). The reason it's deprecated is because it + // doesn't handle case-insensitive paths, nor does it guarantee that the + // prefix is actually a parent directory. Neither is an issue here: the + // paths come from the system so their cases will match, and we guarantee + // a parent directory by appending a slash to the prefix. + // + // NOTE: m.Path will always use slashes as path separators. + if strings.HasPrefix(m.Path, skip) { + continue + } + skip = "" + } + if err = fn(m.Path, m.Entry); err != nil { + if err == SkipDir { + isDir, err := g.isDir(fsys, "", m.Path, m.Entry) + if err != nil { + return err + } + if isDir { + // append a slash to guarantee `skip` will be treated as a parent dir + skip = m.Path + "/" + } else { + // Dir() calls Clean() which calls FromSlash(), so we need to convert + // back to slashes + skip = filepath.ToSlash(filepath.Dir(m.Path)) + "/" + } + err = nil + continue + } + return + } + } + + return +} + +// runs actual matching for alts +func (g *glob) doGlobAltsWalk(fsys fs.FS, d, pattern string, startIdx, openingIdx, closingIdx, afterIdx int, firstSegment, beforeMeta bool, m []DirEntryWithFullPath) (matches []DirEntryWithFullPath, err error) { + matches = m + matchesLen := len(m) + patIdx := openingIdx + 1 + for patIdx < closingIdx { + nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true) + if nextIdx == -1 { + nextIdx = closingIdx + } else { + nextIdx += patIdx + } + + alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx) + err = g.doGlobWalk(fsys, alt, firstSegment, beforeMeta, func(p string, d fs.DirEntry) error { + // insertion sort, ignoring dups + insertIdx := matchesLen + for insertIdx > 0 && matches[insertIdx-1].Path > p { + insertIdx-- + } + if insertIdx > 0 && matches[insertIdx-1].Path == p { + // dup + return nil + } + + // append to grow the slice, then insert + entry := DirEntryWithFullPath{d, p} + matches = append(matches, entry) + for i := matchesLen; i > insertIdx; i-- { + matches[i] = matches[i-1] + } + matches[insertIdx] = entry + matchesLen++ + + return nil + }) + if err != nil { + return + } + + patIdx = nextIdx + 1 + } + + return +} + +func (g *glob) globDirWalk(fsys fs.FS, dir, pattern string, canMatchFiles, beforeMeta bool, fn GlobWalkFunc) (e error) { + if pattern == "" { + if !canMatchFiles || !g.filesOnly { + // pattern can be an empty string if the original pattern ended in a + // slash, in which case, we should just return dir, but only if it + // actually exists and it's a directory (or a symlink to a directory) + info, isDir, err := g.isPathDir(fsys, dir, beforeMeta) + if err != nil { + return err + } + if isDir { + e = fn(dir, dirEntryFromFileInfo(info)) + if e == SkipDir { + e = nil + } + } + } + return + } + + if pattern == "**" { + // `**` can match *this* dir + info, dirExists, err := g.exists(fsys, dir, beforeMeta) + if err != nil { + return err + } + if !dirExists || !info.IsDir() { + return nil + } + if !canMatchFiles || !g.filesOnly { + if e = fn(dir, dirEntryFromFileInfo(info)); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + return g.globDoubleStarWalk(fsys, dir, canMatchFiles, fn) + } + + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return g.handlePatternNotExist(beforeMeta) + } + return g.forwardErrIfFailOnIOErrors(err) + } + + var matched bool + for _, info := range dirs { + name := info.Name() + matched, e = matchWithSeparator(pattern, name, '/', false) + if e != nil { + return + } + if matched { + matched = canMatchFiles + if !matched || g.filesOnly { + matched, e = g.isDir(fsys, dir, name, info) + if e != nil { + return e + } + if canMatchFiles { + // if we're here, it's because g.filesOnly + // is set and we don't want directories + matched = !matched + } + } + if matched { + if e = fn(path.Join(dir, name), info); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + } + } + + return +} + +// recursively walk files/directories in a directory +func (g *glob) globDoubleStarWalk(fsys fs.FS, dir string, canMatchFiles bool, fn GlobWalkFunc) (e error) { + dirs, err := fs.ReadDir(fsys, dir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // This function is only ever called after we know the top-most directory + // exists, so, if we ever get here, we know we'll never return + // ErrPatternNotExist. + return nil + } + return g.forwardErrIfFailOnIOErrors(err) + } + + for _, info := range dirs { + name := info.Name() + isDir, err := g.isDir(fsys, dir, name, info) + if err != nil { + return err + } + + if isDir { + p := path.Join(dir, name) + if !canMatchFiles || !g.filesOnly { + // `**` can match *this* dir, so add it + if e = fn(p, info); e != nil { + if e == SkipDir { + e = nil + continue + } + return + } + } + if e = g.globDoubleStarWalk(fsys, p, canMatchFiles, fn); e != nil { + return + } + } else if canMatchFiles { + if e = fn(path.Join(dir, name), info); e != nil { + if e == SkipDir { + e = nil + } + return + } + } + } + + return +} + +type DirEntryFromFileInfo struct { + fi fs.FileInfo +} + +func (d *DirEntryFromFileInfo) Name() string { + return d.fi.Name() +} + +func (d *DirEntryFromFileInfo) IsDir() bool { + return d.fi.IsDir() +} + +func (d *DirEntryFromFileInfo) Type() fs.FileMode { + return d.fi.Mode().Type() +} + +func (d *DirEntryFromFileInfo) Info() (fs.FileInfo, error) { + return d.fi, nil +} + +func dirEntryFromFileInfo(fi fs.FileInfo) fs.DirEntry { + return &DirEntryFromFileInfo{fi} +} + +type DirEntryWithFullPath struct { + Entry fs.DirEntry + Path string +} + +func hasMetaExceptAlts(s string) bool { + var c byte + l := len(s) + for i := 0; i < l; i++ { + c = s[i] + if c == '*' || c == '?' || c == '[' { + return true + } else if c == '\\' { + // skip next byte + i++ + } + } + return false +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/match.go b/vendor/github.com/bmatcuk/doublestar/v4/match.go new file mode 100644 index 0000000000..6581d99862 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/match.go @@ -0,0 +1,376 @@ +package doublestar + +import ( + "path/filepath" + "unicode/utf8" +) + +// Match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '/**/' matches zero or more directories +// '?' matches any single non-path-separator character +// '[' [ '^' '!' ] { character-range } ']' +// character class (must be non-empty) +// starting with `^` or `!` negates the class +// '{' { term } [ ',' { term } ... ] '}' +// alternatives +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match returns true if `name` matches the file name `pattern`. `name` and +// `pattern` are split on forward slash (`/`) characters and may be relative or +// absolute. +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// A doublestar (`**`) should appear surrounded by path separators such as +// `/**/`. A mid-pattern doublestar (`**`) behaves like bash's globstar +// option: a pattern such as `path/to/**.txt` would return the same results as +// `path/to/*.txt`. The pattern you're looking for is `path/to/**/*.txt`. +// +// Note: this is meant as a drop-in replacement for path.Match() which +// always uses '/' as the path separator. If you want to support systems +// which use a different path separator (such as Windows), what you want +// is PathMatch(). Alternatively, you can run filepath.ToSlash() on both +// pattern and name and then use this function. +// +// Note: users should _not_ count on the returned error, +// doublestar.ErrBadPattern, being equal to path.ErrBadPattern. +// +func Match(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, '/', true) +} + +// PathMatch returns true if `name` matches the file name `pattern`. The +// difference between Match and PathMatch is that PathMatch will automatically +// use your system's path separator to split `name` and `pattern`. On systems +// where the path separator is `'\'`, escaping will be disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Match(). It +// assumes that both `pattern` and `name` are using the system's path +// separator. If you can't be sure of that, use filepath.ToSlash() on both +// `pattern` and `name`, and then use the Match() function instead. +// +func PathMatch(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, filepath.Separator, true) +} + +func matchWithSeparator(pattern, name string, separator rune, validate bool) (matched bool, err error) { + return doMatchWithSeparator(pattern, name, separator, validate, -1, -1, -1, -1, 0, 0) +} + +func doMatchWithSeparator(pattern, name string, separator rune, validate bool, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, patIdx, nameIdx int) (matched bool, err error) { + patLen := len(pattern) + nameLen := len(name) + startOfSegment := true +MATCH: + for nameIdx < nameLen { + if patIdx < patLen { + switch pattern[patIdx] { + case '*': + if patIdx++; patIdx < patLen && pattern[patIdx] == '*' { + // doublestar - must begin with a path separator, otherwise we'll + // treat it like a single star like bash + patIdx++ + if startOfSegment { + if patIdx >= patLen { + // pattern ends in `/**`: return true + return true, nil + } + + // doublestar must also end with a path separator, otherwise we're + // just going to treat the doublestar as a single star like bash + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + if patRune == separator { + patIdx += patRuneLen + + doublestarPatternBacktrack = patIdx + doublestarNameBacktrack = nameIdx + starPatternBacktrack = -1 + starNameBacktrack = -1 + continue + } + } + } + startOfSegment = false + + starPatternBacktrack = patIdx + starNameBacktrack = nameIdx + continue + + case '?': + startOfSegment = false + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if nameRune == separator { + // `?` cannot match the separator + break + } + + patIdx++ + nameIdx += nameRuneLen + continue + + case '[': + startOfSegment = false + if patIdx++; patIdx >= patLen { + // class didn't end + return false, ErrBadPattern + } + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + + matched := false + negate := pattern[patIdx] == '!' || pattern[patIdx] == '^' + if negate { + patIdx++ + } + + if patIdx >= patLen || pattern[patIdx] == ']' { + // class didn't end or empty character class + return false, ErrBadPattern + } + + last := utf8.MaxRune + for patIdx < patLen && pattern[patIdx] != ']' { + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + // match a range + if last < utf8.MaxRune && patRune == '-' && patIdx < patLen && pattern[patIdx] != ']' { + if pattern[patIdx] == '\\' { + // next character is escaped + patIdx++ + } + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + + if last <= nameRune && nameRune <= patRune { + matched = true + break + } + + // didn't match range - reset `last` + last = utf8.MaxRune + continue + } + + // not a range - check if the next rune is escaped + if patRune == '\\' { + patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:]) + patIdx += patRuneLen + } + + // check if the rune matches + if patRune == nameRune { + matched = true + break + } + + // no matches yet + last = patRune + } + + if matched == negate { + // failed to match - if we reached the end of the pattern, that means + // we never found a closing `]` + if patIdx >= patLen { + return false, ErrBadPattern + } + break + } + + closingIdx := indexUnescapedByte(pattern[patIdx:], ']', true) + if closingIdx == -1 { + // no closing `]` + return false, ErrBadPattern + } + + patIdx += closingIdx + 1 + nameIdx += nameRuneLen + continue + + case '{': + startOfSegment = false + beforeIdx := patIdx + patIdx++ + closingIdx := indexMatchedClosingAlt(pattern[patIdx:], separator != '\\') + if closingIdx == -1 { + // no closing `}` + return false, ErrBadPattern + } + closingIdx += patIdx + + for { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + result, err := doMatchWithSeparator(pattern[:beforeIdx]+pattern[patIdx:commaIdx]+pattern[closingIdx+1:], name, separator, validate, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, beforeIdx, nameIdx) + if result || err != nil { + return result, err + } + + patIdx = commaIdx + 1 + } + return doMatchWithSeparator(pattern[:beforeIdx]+pattern[patIdx:closingIdx]+pattern[closingIdx+1:], name, separator, validate, doublestarPatternBacktrack, doublestarNameBacktrack, starPatternBacktrack, starNameBacktrack, beforeIdx, nameIdx) + + case '\\': + if separator != '\\' { + // next rune is "escaped" in the pattern - literal match + if patIdx++; patIdx >= patLen { + // pattern ended + return false, ErrBadPattern + } + } + fallthrough + + default: + patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:]) + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + if patRune != nameRune { + if separator != '\\' && patIdx > 0 && pattern[patIdx-1] == '\\' { + // if this rune was meant to be escaped, we need to move patIdx + // back to the backslash before backtracking or validating below + patIdx-- + } + break + } + + patIdx += patRuneLen + nameIdx += nameRuneLen + startOfSegment = patRune == separator + continue + } + } + + if starPatternBacktrack >= 0 { + // `*` backtrack, but only if the `name` rune isn't the separator + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[starNameBacktrack:]) + if nameRune != separator { + starNameBacktrack += nameRuneLen + patIdx = starPatternBacktrack + nameIdx = starNameBacktrack + startOfSegment = false + continue + } + } + + if doublestarPatternBacktrack >= 0 { + // `**` backtrack, advance `name` past next separator + nameIdx = doublestarNameBacktrack + for nameIdx < nameLen { + nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:]) + nameIdx += nameRuneLen + if nameRune == separator { + doublestarNameBacktrack = nameIdx + patIdx = doublestarPatternBacktrack + startOfSegment = true + continue MATCH + } + } + } + + if validate && patIdx < patLen && !doValidatePattern(pattern[patIdx:], separator) { + return false, ErrBadPattern + } + return false, nil + } + + if nameIdx < nameLen { + // we reached the end of `pattern` before the end of `name` + return false, nil + } + + // we've reached the end of `name`; we've successfully matched if we've also + // reached the end of `pattern`, or if the rest of `pattern` can match a + // zero-length string + return isZeroLengthPattern(pattern[patIdx:], separator) +} + +func isZeroLengthPattern(pattern string, separator rune) (ret bool, err error) { + // `/**` is a special case - a pattern such as `path/to/a/**` *should* match + // `path/to/a` because `a` might be a directory + if pattern == "" || pattern == "*" || pattern == "**" || pattern == string(separator)+"**" { + return true, nil + } + + if pattern[0] == '{' { + closingIdx := indexMatchedClosingAlt(pattern[1:], separator != '\\') + if closingIdx == -1 { + // no closing '}' + return false, ErrBadPattern + } + closingIdx += 1 + + patIdx := 1 + for { + commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\') + if commaIdx == -1 { + break + } + commaIdx += patIdx + + ret, err = isZeroLengthPattern(pattern[patIdx:commaIdx]+pattern[closingIdx+1:], separator) + if ret || err != nil { + return + } + + patIdx = commaIdx + 1 + } + return isZeroLengthPattern(pattern[patIdx:closingIdx]+pattern[closingIdx+1:], separator) + } + + // no luck - validate the rest of the pattern + if !doValidatePattern(pattern, separator) { + return false, ErrBadPattern + } + return false, nil +} + +// Finds the index of the first unescaped byte `c`, or negative 1. +func indexUnescapedByte(s string, c byte, allowEscaping bool) int { + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == c { + return i + } + } + return -1 +} + +// Assuming the byte before the beginning of `s` is an opening `{`, this +// function will find the index of the matching `}`. That is, it'll skip over +// any nested `{}` and account for escaping +func indexMatchedClosingAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + if alts--; alts == 0 { + return i + } + } + } + return -1 +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/utils.go b/vendor/github.com/bmatcuk/doublestar/v4/utils.go new file mode 100644 index 0000000000..0ab1dc98f7 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/utils.go @@ -0,0 +1,147 @@ +package doublestar + +import ( + "errors" + "os" + "path" + "path/filepath" + "strings" +) + +// SplitPattern is a utility function. Given a pattern, SplitPattern will +// return two strings: the first string is everything up to the last slash +// (`/`) that appears _before_ any unescaped "meta" characters (ie, `*?[{`). +// The second string is everything after that slash. For example, given the +// pattern: +// +// ../../path/to/meta*/** +// ^----------- split here +// +// SplitPattern returns "../../path/to" and "meta*/**". This is useful for +// initializing os.DirFS() to call Glob() because Glob() will silently fail if +// your pattern includes `/./` or `/../`. For example: +// +// base, pattern := SplitPattern("../../path/to/meta*/**") +// fsys := os.DirFS(base) +// matches, err := Glob(fsys, pattern) +// +// If SplitPattern cannot find somewhere to split the pattern (for example, +// `meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in +// this example). +// +// Of course, it is your responsibility to decide if the returned base path is +// "safe" in the context of your application. Perhaps you could use Match() to +// validate against a list of approved base directories? +// +func SplitPattern(p string) (base, pattern string) { + base = "." + pattern = p + + splitIdx := -1 + for i := 0; i < len(p); i++ { + c := p[i] + if c == '\\' { + i++ + } else if c == '/' { + splitIdx = i + } else if c == '*' || c == '?' || c == '[' || c == '{' { + break + } + } + + if splitIdx == 0 { + return "/", p[1:] + } else if splitIdx > 0 { + return p[:splitIdx], p[splitIdx+1:] + } + + return +} + +// FilepathGlob returns the names of all files matching pattern or nil if there +// is no matching file. The syntax of pattern is the same as in Match(). The +// pattern may describe hierarchical names such as usr/*/bin/ed. +// +// FilepathGlob ignores file system errors such as I/O errors reading +// directories by default. The only possible returned error is ErrBadPattern, +// reporting that the pattern is malformed. +// +// To enable aborting on I/O errors, the WithFailOnIOErrors option can be +// passed. +// +// Note: FilepathGlob is a convenience function that is meant as a drop-in +// replacement for `path/filepath.Glob()` for users who don't need the +// complication of io/fs. Basically, it: +// - Runs `filepath.Clean()` and `ToSlash()` on the pattern +// - Runs `SplitPattern()` to get a base path and a pattern to Glob +// - Creates an FS object from the base path and `Glob()s` on the pattern +// - Joins the base path with all of the matches from `Glob()` +// +// Returned paths will use the system's path separator, just like +// `filepath.Glob()`. +// +// Note: the returned error doublestar.ErrBadPattern is not equal to +// filepath.ErrBadPattern. +// +func FilepathGlob(pattern string, opts ...GlobOption) (matches []string, err error) { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + base, f := SplitPattern(pattern) + if f == "" || f == "." || f == ".." { + // some special cases to match filepath.Glob behavior + if !ValidatePathPattern(pattern) { + return nil, ErrBadPattern + } + + if filepath.Separator != '\\' { + pattern = unescapeMeta(pattern) + } + + if _, err = os.Lstat(pattern); err != nil { + g := newGlob(opts...) + if errors.Is(err, os.ErrNotExist) { + return nil, g.handlePatternNotExist(true) + } + return nil, g.forwardErrIfFailOnIOErrors(err) + } + return []string{filepath.FromSlash(pattern)}, nil + } + + fs := os.DirFS(base) + if matches, err = Glob(fs, f, opts...); err != nil { + return nil, err + } + for i := range matches { + // use path.Join because we used ToSlash above to ensure our paths are made + // of forward slashes, no matter what the system uses + matches[i] = filepath.FromSlash(path.Join(base, matches[i])) + } + return +} + +// Finds the next comma, but ignores any commas that appear inside nested `{}`. +// Assumes that each opening bracket has a corresponding closing bracket. +func indexNextAlt(s string, allowEscaping bool) int { + alts := 1 + l := len(s) + for i := 0; i < l; i++ { + if allowEscaping && s[i] == '\\' { + // skip next byte + i++ + } else if s[i] == '{' { + alts++ + } else if s[i] == '}' { + alts-- + } else if s[i] == ',' && alts == 1 { + return i + } + } + return -1 +} + +var metaReplacer = strings.NewReplacer("\\*", "*", "\\?", "?", "\\[", "[", "\\]", "]", "\\{", "{", "\\}", "}") + +// Unescapes meta characters (*?[]{}) +func unescapeMeta(pattern string) string { + return metaReplacer.Replace(pattern) +} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/validate.go b/vendor/github.com/bmatcuk/doublestar/v4/validate.go new file mode 100644 index 0000000000..c689b9ebab --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/v4/validate.go @@ -0,0 +1,82 @@ +package doublestar + +import "path/filepath" + +// Validate a pattern. Patterns are validated while they run in Match(), +// PathMatch(), and Glob(), so, you normally wouldn't need to call this. +// However, there are cases where this might be useful: for example, if your +// program allows a user to enter a pattern that you'll run at a later time, +// you might want to validate it. +// +// ValidatePattern assumes your pattern uses '/' as the path separator. +// +func ValidatePattern(s string) bool { + return doValidatePattern(s, '/') +} + +// Like ValidatePattern, only uses your OS path separator. In other words, use +// ValidatePattern if you would normally use Match() or Glob(). Use +// ValidatePathPattern if you would normally use PathMatch(). Keep in mind, +// Glob() requires '/' separators, even if your OS uses something else. +// +func ValidatePathPattern(s string) bool { + return doValidatePattern(s, filepath.Separator) +} + +func doValidatePattern(s string, separator rune) bool { + altDepth := 0 + l := len(s) +VALIDATE: + for i := 0; i < l; i++ { + switch s[i] { + case '\\': + if separator != '\\' { + // skip the next byte - return false if there is no next byte + if i++; i >= l { + return false + } + } + continue + + case '[': + if i++; i >= l { + // class didn't end + return false + } + if s[i] == '^' || s[i] == '!' { + i++ + } + if i >= l || s[i] == ']' { + // class didn't end or empty character class + return false + } + + for ; i < l; i++ { + if separator != '\\' && s[i] == '\\' { + i++ + } else if s[i] == ']' { + // looks good + continue VALIDATE + } + } + + // class didn't end + return false + + case '{': + altDepth++ + continue + + case '}': + if altDepth == 0 { + // alt end without a corresponding start + return false + } + altDepth-- + continue + } + } + + // valid as long as all alts are closed + return altDepth == 0 +} diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md index 99dd0151fd..9c3071ec62 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/README.md @@ -83,10 +83,26 @@ WebHook request } ``` +# Customizing signing behavior + +Users can customize signing behavior by passing in a +[Signer](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#Signer) +implementation when creating an +[AppsTransport](https://pkg.go.dev/github.com/bradleyfalzon/ghinstallation/v2#AppsTransport). +For example, this can be used to create tokens backed by keys in a KMS system. + +```go +signer := &myCustomSigner{ + key: "https://url/to/key/vault", +} +atr := NewAppsTransportWithOptions(http.DefaultTransport, 1, WithSigner(signer)) +tr := NewFromAppsTransport(atr, 99) +``` + # License [Apache 2.0](LICENSE) # Dependencies -- [github.com/golang-jwt/jwt-go](https://github.com/golang-jwt/jwt-go) +- [github.com/golang-jwt/jwt-go](https://github.com/golang-jwt/jwt-go) diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go index e1424d6294..317de76fc1 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go @@ -2,6 +2,7 @@ package ghinstallation import ( "crypto/rsa" + "errors" "fmt" "io/ioutil" "net/http" @@ -23,7 +24,7 @@ type AppsTransport struct { BaseURL string // BaseURL is the scheme and host for GitHub API, defaults to https://api.github.com Client Client // Client to use to refresh tokens, defaults to http.Client with provided transport tr http.RoundTripper // tr is the underlying roundtripper being wrapped - key *rsa.PrivateKey // key is the GitHub App's private key + signer Signer // signer signs JWT tokens. appID int64 // appID is the GitHub App's ID } @@ -57,11 +58,29 @@ func NewAppsTransportFromPrivateKey(tr http.RoundTripper, appID int64, key *rsa. BaseURL: apiBaseURL, Client: &http.Client{Transport: tr}, tr: tr, - key: key, + signer: NewRSASigner(jwt.SigningMethodRS256, key), appID: appID, } } +func NewAppsTransportWithOptions(tr http.RoundTripper, appID int64, opts ...AppsTransportOption) (*AppsTransport, error) { + t := &AppsTransport{ + BaseURL: apiBaseURL, + Client: &http.Client{Transport: tr}, + tr: tr, + appID: appID, + } + for _, fn := range opts { + fn(t) + } + + if t.signer == nil { + return nil, errors.New("no signer provided") + } + + return t, nil +} + // RoundTrip implements http.RoundTripper interface. func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) { // GitHub rejects expiry and issue timestamps that are not an integer, @@ -69,14 +88,13 @@ func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) { // Truncate them before passing to jwt-go. iss := time.Now().Add(-30 * time.Second).Truncate(time.Second) exp := iss.Add(2 * time.Minute) - claims := &jwt.StandardClaims{ - IssuedAt: iss.Unix(), - ExpiresAt: exp.Unix(), + claims := &jwt.RegisteredClaims{ + IssuedAt: jwt.NewNumericDate(iss), + ExpiresAt: jwt.NewNumericDate(exp), Issuer: strconv.FormatInt(t.appID, 10), } - bearer := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) - ss, err := bearer.SignedString(t.key) + ss, err := t.signer.Sign(claims) if err != nil { return nil, fmt.Errorf("could not sign jwt: %s", err) } @@ -87,3 +105,12 @@ func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) { resp, err := t.tr.RoundTrip(req) return resp, err } + +type AppsTransportOption func(*AppsTransport) + +// WithSigner configures the AppsTransport to use the given Signer for generating JWT tokens. +func WithSigner(signer Signer) AppsTransportOption { + return func(at *AppsTransport) { + at.signer = signer + } +} diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go new file mode 100644 index 0000000000..928e10efc9 --- /dev/null +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/sign.go @@ -0,0 +1,33 @@ +package ghinstallation + +import ( + "crypto/rsa" + + jwt "github.com/golang-jwt/jwt/v4" +) + +// Signer is a JWT token signer. This is a wrapper around [jwt.SigningMethod] with predetermined +// key material. +type Signer interface { + // Sign signs the given claims and returns a JWT token string, as specified + // by [jwt.Token.SignedString] + Sign(claims jwt.Claims) (string, error) +} + +// RSASigner signs JWT tokens using RSA keys. +type RSASigner struct { + method *jwt.SigningMethodRSA + key *rsa.PrivateKey +} + +func NewRSASigner(method *jwt.SigningMethodRSA, key *rsa.PrivateKey) *RSASigner { + return &RSASigner{ + method: method, + key: key, + } +} + +// Sign signs the JWT claims with the RSA key. +func (s *RSASigner) Sign(claims jwt.Claims) (string, error) { + return jwt.NewWithClaims(s.method, claims).SignedString(s.key) +} diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go index ecebffad1b..015ebe4b65 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -12,7 +13,7 @@ import ( "sync" "time" - "github.com/google/go-github/v45/github" + "github.com/google/go-github/v53/github" ) const ( @@ -63,6 +64,11 @@ func (e *HTTPError) Error() string { return e.Message } +// Unwrap implements the standard library's error wrapping. It unwraps to the root cause. +func (e *HTTPError) Unwrap() error { + return e.RootCause +} + var _ http.RoundTripper = &Transport{} // NewKeyFromFile returns a Transport using a private key from file. @@ -119,7 +125,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { } }() } - + token, err := t.Token(req.Context()) if err != nil { return nil, err @@ -127,18 +133,29 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { creq := cloneRequest(req) // per RoundTripper contract creq.Header.Set("Authorization", "token "+token) - creq.Header.Add("Accept", acceptHeader) // We add to "Accept" header to avoid overwriting existing req headers. + + if creq.Header.Get("Accept") == "" { // We only add an "Accept" header to avoid overwriting the expected behavior. + creq.Header.Add("Accept", acceptHeader) + } reqBodyClosed = true // req.Body is assumed to be closed by the tr RoundTripper. resp, err := t.tr.RoundTrip(creq) return resp, err } +func (at *accessToken) getRefreshTime() time.Time { + return at.ExpiresAt.Add(-time.Minute) +} + +func (at *accessToken) isExpired() bool { + return at == nil || at.getRefreshTime().Before(time.Now()) +} + // Token checks the active token expiration and renews if necessary. Token returns // a valid access token. If renewal fails an error is returned. func (t *Transport) Token(ctx context.Context) (string, error) { t.mu.Lock() defer t.mu.Unlock() - if t.token == nil || t.token.ExpiresAt.Add(-time.Minute).Before(time.Now()) { + if t.token.isExpired() { // Token is not set or expired/nearly expired, so refresh if err := t.refreshToken(ctx); err != nil { return "", fmt.Errorf("could not refresh installation id %v's token: %w", t.installationID, err) @@ -164,6 +181,16 @@ func (t *Transport) Repositories() ([]github.Repository, error) { return t.token.Repositories, nil } +// Expiry returns a transport token's expiration time and refresh time. There is a small grace period +// built in where a token will be refreshed before it expires. expiresAt is the actual token expiry, +// and refreshAt is when a call to Token() will cause it to be refreshed. +func (t *Transport) Expiry() (expiresAt time.Time, refreshAt time.Time, err error) { + if t.token == nil { + return time.Time{}, time.Time{}, errors.New("Expiry() = unknown, err: nil token") + } + return t.token.ExpiresAt, t.token.getRefreshTime(), nil +} + func (t *Transport) refreshToken(ctx context.Context) error { // Convert InstallationTokenOptions into a ReadWriter to pass as an argument to http.NewRequest. body, err := GetReadWriter(t.InstallationTokenOptions) diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go index 8cd4e333b9..83d7cdadd3 100644 --- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go +++ b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go @@ -9,7 +9,7 @@ import ( fp "github.com/cloudflare/circl/math/fp448" ) -// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogeneous to Goldilocks. +// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks. type twistCurve struct{} // Identity returns the identity point. diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go index ab19d0ad12..1755fd1e6d 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go @@ -6,13 +6,21 @@ package sha3 // KeccakF1600 applies the Keccak permutation to a 1600b-wide // state represented as a slice of 25 uint64s. +// If turbo is true, applies the 12-round variant instead of the +// regular 24-round variant. // nolint:funlen -func KeccakF1600(a *[25]uint64) { +func KeccakF1600(a *[25]uint64, turbo bool) { // Implementation translated from Keccak-inplace.c // in the keccak reference code. var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - for i := 0; i < 24; i += 4 { + i := 0 + + if turbo { + i = 12 + } + + for ; i < 24; i += 4 { // Combines the 5 steps in each round into 2 steps. // Unrolls 4 rounds per loop and spreads some steps across rounds. diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go index b35cd006b0..a0df5aa6c5 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go @@ -51,6 +51,7 @@ type State struct { // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing + turbo bool // Whether we're using 12 rounds instead of 24 } // BlockSize returns the rate of sponge underlying this hash function. @@ -86,11 +87,11 @@ func (d *State) permute() { xorIn(d, d.buf()) d.bufe = 0 d.bufo = 0 - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) case spongeSqueezing: // If we're squeezing, we need to apply the permutation before // copying more output. - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) d.bufe = d.rate d.bufo = 0 copyOut(d, d.buf()) @@ -136,7 +137,7 @@ func (d *State) Write(p []byte) (written int, err error) { // The fast path; absorb a full "rate" bytes of input and apply the permutation. xorIn(d, p[:d.rate]) p = p[d.rate:] - KeccakF1600(&d.a) + KeccakF1600(&d.a, d.turbo) } else { // The slow path; buffer the input until we can fill the sponge, and then xor it in. todo := d.rate - bufl @@ -193,3 +194,7 @@ func (d *State) Sum(in []byte) []byte { _, _ = dup.Read(hash) return append(in, hash...) } + +func (d *State) IsAbsorbing() bool { + return d.state == spongeAbsorbing +} diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go index b92c5b7d78..77817f758c 100644 --- a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go +++ b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go @@ -57,6 +57,17 @@ func NewShake128() State { return State{rate: rate128, dsbyte: dsbyteShake} } +// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash. +// Its generic security strength is 128 bits against all attacks if at +// least 32 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake128(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate128, dsbyte: D, turbo: true} +} + // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. @@ -64,6 +75,17 @@ func NewShake256() State { return State{rate: rate256, dsbyte: dsbyteShake} } +// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash. +// Its generic security strength is 256 bits against all attacks if +// at least 64 bytes of its output are used. +// D is the domain separation byte and must be between 0x01 and 0x7f inclusive. +func NewTurboShake256(D byte) State { + if D == 0 || D > 0x7f { + panic("turboshake: D out of range") + } + return State{rate: rate256, dsbyte: D, turbo: true} +} + // ShakeSum128 writes an arbitrary-length digest of data into hash. func ShakeSum128(hash, data []byte) { h := NewShake128() @@ -77,3 +99,21 @@ func ShakeSum256(hash, data []byte) { _, _ = h.Write(data) _, _ = h.Read(hash) } + +// TurboShakeSum128 writes an arbitrary-length digest of data into hash. +func TurboShakeSum128(hash, data []byte, D byte) { + h := NewTurboShake128(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +// TurboShakeSum256 writes an arbitrary-length digest of data into hash. +func TurboShakeSum256(hash, data []byte, D byte) { + h := NewTurboShake256(D) + _, _ = h.Write(data) + _, _ = h.Read(hash) +} + +func (d *State) SwitchDS(D byte) { + d.dsbyte = D +} diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go new file mode 100644 index 0000000000..158fd83a7a --- /dev/null +++ b/vendor/github.com/cloudflare/circl/math/primes.go @@ -0,0 +1,34 @@ +package math + +import ( + "crypto/rand" + "io" + "math/big" +) + +// IsSafePrime reports whether p is (probably) a safe prime. +// The prime p=2*q+1 is safe prime if both p and q are primes. +// Note that ProbablyPrime is not suitable for judging primes +// that an adversary may have crafted to fool the test. +func IsSafePrime(p *big.Int) bool { + pdiv2 := new(big.Int).Rsh(p, 1) + return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20) +} + +// SafePrime returns a number of the given bit length that is a safe prime with high probability. +// The number returned p=2*q+1 is a safe prime if both p and q are primes. +// SafePrime will return error for any error returned by rand.Read or if bits < 2. +func SafePrime(random io.Reader, bits int) (*big.Int, error) { + one := big.NewInt(1) + p := new(big.Int) + for { + q, err := rand.Prime(random, bits-1) + if err != nil { + return nil, err + } + p.Lsh(q, 1).Add(p, one) + if p.ProbablyPrime(20) { + return p, nil + } + } +} diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go index 08ca65d799..2c73c26fb1 100644 --- a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go +++ b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go @@ -1,7 +1,7 @@ // Package ed25519 implements Ed25519 signature scheme as described in RFC-8032. // // This package provides optimized implementations of the three signature -// variants and maintaining closer compatiblilty with crypto/ed25519. +// variants and maintaining closer compatibility with crypto/ed25519. // // | Scheme Name | Sign Function | Verification | Context | // |-------------|-------------------|---------------|-------------------| diff --git a/vendor/github.com/devtron-labs/authenticator/client/k8sClient.go b/vendor/github.com/devtron-labs/authenticator/client/k8sClient.go index 11c95d8d31..93d73251e3 100644 --- a/vendor/github.com/devtron-labs/authenticator/client/k8sClient.go +++ b/vendor/github.com/devtron-labs/authenticator/client/k8sClient.go @@ -126,6 +126,10 @@ func (impl *K8sClient) GetDevtronConfig() (secret *v1.Secret, err error) { return secret, nil } +func (impl *K8sClient) GetDevtronNamespace() string { + return impl.runtimeConfig.DevtronDefaultNamespaceName +} + // argocd specific conf const ( SettingAdminPasswordHashKey = "admin.password" diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml index 9d00fdd5d6..77a6bccf77 100644 --- a/vendor/github.com/go-errors/errors/.travis.yml +++ b/vendor/github.com/go-errors/errors/.travis.yml @@ -3,3 +3,6 @@ language: go go: - "1.8.x" - "1.10.x" + - "1.13.x" + - "1.14.x" + - "1.16.x" diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md index 5d4f1873dd..3d78525940 100644 --- a/vendor/github.com/go-errors/errors/README.md +++ b/vendor/github.com/go-errors/errors/README.md @@ -64,3 +64,19 @@ packages by Facebook and Dropbox, it was moved to one canonical location so everyone can benefit. This package is licensed under the MIT license, see LICENSE.MIT for details. + + +## Changelog +* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is +* v1.2.0 added `errors.As` from the standard library. +* v1.3.0 *BREAKING* updated error methods to return `error` instead of `*Error`. +> Code that needs access to the underlying `*Error` can use the new errors.AsError(e) +> ``` +> // before +> errors.New(err).ErrorStack() +> // after +>. errors.AsError(errors.Wrap(err)).ErrorStack() +> ``` +* v1.4.0 *BREAKING* v1.4.0 reverted all changes from v1.3.0 and is identical to v1.2.0 +* v1.4.1 no code change, but now without an unnecessary cover.out file. +* v1.4.2 performance improvement to ErrorStack() to avoid unnecessary work https://github.com/go-errors/errors/pull/40 diff --git a/vendor/github.com/go-errors/errors/cover.out b/vendor/github.com/go-errors/errors/cover.out deleted file mode 100644 index ab18b0519f..0000000000 --- a/vendor/github.com/go-errors/errors/cover.out +++ /dev/null @@ -1,89 +0,0 @@ -mode: set -github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1 -github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1 -github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0 -github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1 -github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1 -github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1 -github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1 -github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1 -github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0 -github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1 -github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1 -github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1 -github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0 -github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0 -github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1 -github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1 -github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1 -github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1 -github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1 -github.com/go-errors/errors/error.go:70.32,73.23 2 1 -github.com/go-errors/errors/error.go:80.2,85.3 3 1 -github.com/go-errors/errors/error.go:74.2,75.10 1 1 -github.com/go-errors/errors/error.go:76.2,77.28 1 1 -github.com/go-errors/errors/error.go:92.43,95.23 2 1 -github.com/go-errors/errors/error.go:104.2,109.3 3 1 -github.com/go-errors/errors/error.go:96.2,97.11 1 1 -github.com/go-errors/errors/error.go:98.2,99.10 1 1 -github.com/go-errors/errors/error.go:100.2,101.28 1 1 -github.com/go-errors/errors/error.go:115.39,117.19 1 1 -github.com/go-errors/errors/error.go:121.2,121.29 1 1 -github.com/go-errors/errors/error.go:125.2,125.43 1 1 -github.com/go-errors/errors/error.go:129.2,129.14 1 1 -github.com/go-errors/errors/error.go:117.19,119.3 1 1 -github.com/go-errors/errors/error.go:121.29,123.3 1 1 -github.com/go-errors/errors/error.go:125.43,127.3 1 1 -github.com/go-errors/errors/error.go:135.53,137.2 1 1 -github.com/go-errors/errors/error.go:140.34,142.2 1 1 -github.com/go-errors/errors/error.go:146.34,149.42 2 1 -github.com/go-errors/errors/error.go:153.2,153.20 1 1 -github.com/go-errors/errors/error.go:149.42,151.3 1 1 -github.com/go-errors/errors/error.go:158.39,160.2 1 1 -github.com/go-errors/errors/error.go:164.46,165.23 1 1 -github.com/go-errors/errors/error.go:173.2,173.19 1 1 -github.com/go-errors/errors/error.go:165.23,168.32 2 1 -github.com/go-errors/errors/error.go:168.32,170.4 1 1 -github.com/go-errors/errors/error.go:177.37,178.42 1 1 -github.com/go-errors/errors/error.go:181.2,181.41 1 1 -github.com/go-errors/errors/error.go:178.42,180.3 1 1 -github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1 -github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1 -github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1 -github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0 -github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1 -github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1 -github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1 -github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0 -github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1 -github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1 -github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1 -github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1 -github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1 -github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1 -github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1 -github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1 -github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1 -github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1 -github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1 -github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0 -github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0 -github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1 -github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1 -github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1 -github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1 -github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1 -github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1 -github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1 -github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1 -github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1 -github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1 -github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1 -github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0 -github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1 -github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1 -github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1 -github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0 -github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0 -github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1 -github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0 diff --git a/vendor/github.com/go-errors/errors/error.go b/vendor/github.com/go-errors/errors/error.go index 60062a4372..ccbc2e4272 100644 --- a/vendor/github.com/go-errors/errors/error.go +++ b/vendor/github.com/go-errors/errors/error.go @@ -91,6 +91,10 @@ func New(e interface{}) *Error { // fmt.Errorf("%v"). The skip parameter indicates how far up the stack // to start the stacktrace. 0 is from the current call, 1 from its caller, etc. func Wrap(e interface{}, skip int) *Error { + if e == nil { + return nil + } + var err error switch e := e.(type) { @@ -117,6 +121,9 @@ func Wrap(e interface{}, skip int) *Error { // up the stack to start the stacktrace. 0 is from the current call, // 1 from its caller, etc. func WrapPrefix(e interface{}, prefix string, skip int) *Error { + if e == nil { + return nil + } err := Wrap(e, 1+skip) @@ -132,26 +139,6 @@ func WrapPrefix(e interface{}, prefix string, skip int) *Error { } -// Is detects whether the error is equal to a given error. Errors -// are considered equal by this function if they are the same object, -// or if they both contain the same error inside an errors.Error. -func Is(e error, original error) bool { - - if e == original { - return true - } - - if e, ok := e.(*Error); ok { - return Is(e.Err, original) - } - - if original, ok := original.(*Error); ok { - return Is(e, original.Err) - } - - return false -} - // Errorf creates a new error with the given message. You can use it // as a drop-in replacement for fmt.Errorf() to provide descriptive // errors in return values. @@ -215,3 +202,8 @@ func (err *Error) TypeName() string { } return reflect.TypeOf(err.Err).String() } + +// Return the wrapped error (implements api for As function). +func (err *Error) Unwrap() error { + return err.Err +} diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go new file mode 100644 index 0000000000..0af2fc8065 --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_1_13.go @@ -0,0 +1,31 @@ +// +build go1.13 + +package errors + +import ( + baseErrors "errors" +) + +// find error in any wrapped error +func As(err error, target interface{}) bool { + return baseErrors.As(err, target) +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are matched by errors.Is +// or if their contained errors are matched through errors.Is +func Is(e error, original error) bool { + if baseErrors.Is(e, original) { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go new file mode 100644 index 0000000000..80b0695e7e --- /dev/null +++ b/vendor/github.com/go-errors/errors/error_backward.go @@ -0,0 +1,57 @@ +// +build !go1.13 + +package errors + +import ( + "reflect" +) + +type unwrapper interface { + Unwrap() error +} + +// As assigns error or any wrapped error to the value target points +// to. If there is no value of the target type of target As returns +// false. +func As(err error, target interface{}) bool { + targetType := reflect.TypeOf(target) + + for { + errType := reflect.TypeOf(err) + + if errType == nil { + return false + } + + if reflect.PtrTo(errType) == targetType { + reflect.ValueOf(target).Elem().Set(reflect.ValueOf(err)) + return true + } + + wrapped, ok := err.(unwrapper) + if ok { + err = wrapped.Unwrap() + } else { + return false + } + } +} + +// Is detects whether the error is equal to a given error. Errors +// are considered equal by this function if they are the same object, +// or if they both contain the same error inside an errors.Error. +func Is(e error, original error) bool { + if e == original { + return true + } + + if e, ok := e.(*Error); ok { + return Is(e.Err, original) + } + + if original, ok := original.(*Error); ok { + return Is(e, original.Err) + } + + return false +} diff --git a/vendor/github.com/go-errors/errors/stackframe.go b/vendor/github.com/go-errors/errors/stackframe.go index 750ab9a521..ef4a8b3f3b 100644 --- a/vendor/github.com/go-errors/errors/stackframe.go +++ b/vendor/github.com/go-errors/errors/stackframe.go @@ -1,9 +1,10 @@ package errors import ( + "bufio" "bytes" "fmt" - "io/ioutil" + "os" "runtime" "strings" ) @@ -52,7 +53,7 @@ func (frame *StackFrame) Func() *runtime.Func { func (frame *StackFrame) String() string { str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) - source, err := frame.SourceLine() + source, err := frame.sourceLine() if err != nil { return str } @@ -62,18 +63,37 @@ func (frame *StackFrame) String() string { // SourceLine gets the line of code (from File and Line) of the original source if possible. func (frame *StackFrame) SourceLine() (string, error) { - data, err := ioutil.ReadFile(frame.File) - + source, err := frame.sourceLine() if err != nil { - return "", New(err) + return source, New(err) } + return source, err +} - lines := bytes.Split(data, []byte{'\n'}) - if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { +func (frame *StackFrame) sourceLine() (string, error) { + if frame.LineNumber <= 0 { return "???", nil } - // -1 because line-numbers are 1 based, but our array is 0 based - return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil + + file, err := os.Open(frame.File) + if err != nil { + return "", err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + currentLine := 1 + for scanner.Scan() { + if currentLine == frame.LineNumber { + return string(bytes.Trim(scanner.Bytes(), " \t")), nil + } + currentLine++ + } + if err := scanner.Err(); err != nil { + return "", err + } + + return "???", nil } func packageAndName(fn *runtime.Func) (string, string) { diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/cache/v9/.prettierrc.yml similarity index 100% rename from vendor/github.com/go-redis/redis/v8/.prettierrc.yml rename to vendor/github.com/go-redis/cache/v9/.prettierrc.yml diff --git a/vendor/github.com/go-redis/cache/v8/CHANGELOG.md b/vendor/github.com/go-redis/cache/v9/CHANGELOG.md similarity index 100% rename from vendor/github.com/go-redis/cache/v8/CHANGELOG.md rename to vendor/github.com/go-redis/cache/v9/CHANGELOG.md diff --git a/vendor/github.com/go-redis/cache/v8/LICENSE b/vendor/github.com/go-redis/cache/v9/LICENSE similarity index 100% rename from vendor/github.com/go-redis/cache/v8/LICENSE rename to vendor/github.com/go-redis/cache/v9/LICENSE diff --git a/vendor/github.com/go-redis/cache/v8/Makefile b/vendor/github.com/go-redis/cache/v9/Makefile similarity index 100% rename from vendor/github.com/go-redis/cache/v8/Makefile rename to vendor/github.com/go-redis/cache/v9/Makefile diff --git a/vendor/github.com/go-redis/cache/v8/README.md b/vendor/github.com/go-redis/cache/v9/README.md similarity index 67% rename from vendor/github.com/go-redis/cache/v8/README.md rename to vendor/github.com/go-redis/cache/v9/README.md index 8850fd140a..579c390e95 100644 --- a/vendor/github.com/go-redis/cache/v8/README.md +++ b/vendor/github.com/go-redis/cache/v9/README.md @@ -1,12 +1,21 @@ # Redis cache library for Golang [![Build Status](https://travis-ci.org/go-redis/cache.svg)](https://travis-ci.org/go-redis/cache) -[![GoDoc](https://godoc.org/github.com/go-redis/cache?status.svg)](https://pkg.go.dev/github.com/go-redis/cache/v8?tab=doc) +[![GoDoc](https://godoc.org/github.com/go-redis/cache?status.svg)](https://pkg.go.dev/github.com/go-redis/cache/v9?tab=doc) -go-redis/cache library implements a cache using Redis as a -key/value storage. It uses [MessagePack](https://github.com/vmihailenco/msgpack) to marshal values. +> go-redis/cache is brought to you by :star: +> [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace is an open source and blazingly +> fast [distributed tracing tool](https://get.uptrace.dev/) powered by OpenTelemetry and ClickHouse. +> Give it a star as well! -Optinally you can use [TinyLFU](https://github.com/dgryski/go-tinylfu) or any other [cache algorithm](https://github.com/vmihailenco/go-cache-benchmark) as a local in-process cache. +go-redis/cache library implements a cache using Redis as a key/value storage. It uses +[MessagePack](https://github.com/vmihailenco/msgpack) to marshal values. + +Optionally, you can use [TinyLFU](https://github.com/dgryski/go-tinylfu) or any other +[cache algorithm](https://github.com/vmihailenco/go-cache-benchmark) as a local in-process cache. + +If you are interested in monitoring cache hit rate, see the guide for +[Monitoring using OpenTelemetry Metrics](https://blog.uptrace.dev/posts/opentelemetry-metrics-cache-stats/). ## Installation @@ -18,10 +27,10 @@ module: go mod init github.com/my/repo ``` -And then install go-redis/cache/v8 (note _v8_ in the import; omitting it is a popular mistake): +And then install go-redis/cache/v9 (note _v9_ in the import; omitting it is a popular mistake): ```shell -go get github.com/go-redis/cache/v8 +go get github.com/go-redis/cache/v9 ``` ## Quickstart @@ -34,8 +43,8 @@ import ( "fmt" "time" - "github.com/go-redis/redis/v8" - "github.com/go-redis/cache/v8" + "github.com/redis/go-redis/v9" + "github.com/go-redis/cache/v9" ) type Object struct { diff --git a/vendor/github.com/go-redis/cache/v8/cache.go b/vendor/github.com/go-redis/cache/v9/cache.go similarity index 98% rename from vendor/github.com/go-redis/cache/v8/cache.go rename to vendor/github.com/go-redis/cache/v9/cache.go index ca6539a889..5410959e38 100644 --- a/vendor/github.com/go-redis/cache/v8/cache.go +++ b/vendor/github.com/go-redis/cache/v9/cache.go @@ -8,8 +8,8 @@ import ( "sync/atomic" "time" - "github.com/go-redis/redis/v8" "github.com/klauspost/compress/s2" + "github.com/redis/go-redis/v9" "github.com/vmihailenco/msgpack/v5" "golang.org/x/sync/singleflight" ) @@ -185,7 +185,8 @@ func (cd *Cache) set(item *Item) ([]byte, bool, error) { // Exists reports whether value for the given key exists. func (cd *Cache) Exists(ctx context.Context, key string) bool { - return cd.Get(ctx, key, nil) == nil + _, err := cd.getBytes(ctx, key, false) + return err == nil } // Get gets the value for the given key. diff --git a/vendor/github.com/go-redis/cache/v8/local.go b/vendor/github.com/go-redis/cache/v9/local.go similarity index 93% rename from vendor/github.com/go-redis/cache/v8/local.go rename to vendor/github.com/go-redis/cache/v9/local.go index 751feb41eb..81f5097824 100644 --- a/vendor/github.com/go-redis/cache/v8/local.go +++ b/vendor/github.com/go-redis/cache/v9/local.go @@ -1,11 +1,11 @@ package cache import ( + "math/rand" "sync" "time" "github.com/vmihailenco/go-tinylfu" - "golang.org/x/exp/rand" ) type LocalCache interface { @@ -33,7 +33,7 @@ func NewTinyLFU(size int, ttl time.Duration) *TinyLFU { } return &TinyLFU{ - rand: rand.New(rand.NewSource(uint64(time.Now().UnixNano()))), + rand: rand.New(rand.NewSource(time.Now().UnixNano())), lfu: tinylfu.New(size, 100000), ttl: ttl, offset: offset, diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md deleted file mode 100644 index 195e519338..0000000000 --- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md +++ /dev/null @@ -1,177 +0,0 @@ -## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17) - - -### Bug Fixes - -* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a)) -* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c)) -* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475)) -* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2)) -* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32)) -* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4)) -* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc)) -* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f)) -* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2)) - - -### Features - -* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8)) -* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e)) -* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7)) -* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e)) -* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b)) -* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417)) -* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d)) - - - -## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04) - - -### Features - -* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634)) -* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24)) -* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4)) - - - -## v8.11 - -- Remove OpenTelemetry metrics. -- Supports more redis commands and options. - -## v8.10 - -- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a - single span with a Redis command (instead of 4 spans). There are multiple reasons behind this - decision: - - - Traces become smaller and less noisy. - - It may be costly to process those 3 extra spans for each query. - - go-redis no longer depends on OpenTelemetry. - - Eventually we hope to replace the information that we no longer collect with OpenTelemetry - Metrics. - -## v8.9 - -- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`, - `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings. - -## v8.8 - -- To make updating easier, extra modules now have the same version as go-redis does. That means that - you need to update your imports: - -``` -github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8 -github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8 -``` - -## v8.5 - -- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a - struct: - -```go -err := rdb.HGetAll(ctx, "hash").Scan(&data) - -err := rdb.MGet(ctx, "key1", "key2").Scan(&data) -``` - -- Please check [redismock](https://github.com/go-redis/redismock) by - [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client. - -## v8 - -- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not - using `context.Context` yet, the simplest option is to define global package variable - `var ctx = context.TODO()` and use it when `ctx` is required. - -- Full support for `context.Context` canceling. - -- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node. - -- Added `redisext.OpenTemetryHook` that adds - [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/). - -- Redis slow log support. - -- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move - existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme: - -```go -import "github.com/golang/groupcache/consistenthash" - -ring := redis.NewRing(&redis.RingOptions{ - NewConsistentHash: func() { - return consistenthash.New(100, crc32.ChecksumIEEE) - }, -}) -``` - -- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3. -- `Options.MaxRetries` default value is changed from 0 to 3. - -- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`. - -## v7.3 - -- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection - URL contains username. - -## v7.2 - -- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users. - -## v7.1 - -- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer` - interface. - -## v7 - -- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a - transactional pipeline. -- WrapProcess is replaced with more convenient AddHook that has access to context.Context. -- WithContext now can not be used to create a shallow copy of the client. -- New methods ProcessContext, DoContext, and ExecContext. -- Client respects Context.Deadline when setting net.Conn deadline. -- Client listens on Context.Done while waiting for a connection from the pool and returns an error - when context context is cancelled. -- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow - detecting reconnections. -- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse - the time. -- `SetLimiter` is removed and added `Options.Limiter` instead. -- `HMSet` is deprecated as of Redis v4. - -## v6.15 - -- Cluster and Ring pipelines process commands for each node in its own goroutine. - -## 6.14 - -- Added Options.MinIdleConns. -- Added Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- Add Client.Do to simplify creating custom commands. -- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. -- Lower memory usage. - -## v6.13 - -- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set - `HashReplicas = 1000` for better keys distribution between shards. -- Cluster client was optimized to use much less memory when reloading cluster state. -- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout - occurres. In most cases it is recommended to use PubSub.Channel instead. -- Dialer.KeepAlive is set to 5 minutes by default. - -## v6.12 - -- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis - Servers that don't have cluster mode enabled. See - https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile deleted file mode 100644 index a4cfe0576e..0000000000 --- a/vendor/github.com/go-redis/redis/v8/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort) - -test: testdeps - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet - -testdeps: testdata/redis/src/redis-server - -bench: testdeps - go test ./... -test.run=NONE -test.bench=. -test.benchmem - -.PHONY: all test testdeps bench - -testdata/redis: - mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@ - -testdata/redis/src/redis-server: testdata/redis - cd $< && make all - -fmt: - gofmt -w -s ./ - goimports -w -local github.com/go-redis/redis ./ - -go_mod_tidy: - go get -u && go mod tidy - set -e; for dir in $(PACKAGE_DIRS); do \ - echo "go mod tidy in $${dir}"; \ - (cd "$${dir}" && \ - go get -u && \ - go mod tidy); \ - done diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go deleted file mode 100644 index 4bb12a85be..0000000000 --- a/vendor/github.com/go-redis/redis/v8/command.go +++ /dev/null @@ -1,3478 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "net" - "strconv" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hscan" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/util" -) - -type Cmder interface { - Name() string - FullName() string - Args() []interface{} - String() string - stringArg(int) string - firstKeyPos() int8 - SetFirstKeyPos(int8) - - readTimeout() *time.Duration - readReply(rd *proto.Reader) error - - SetErr(error) - Err() error -} - -func setCmdsErr(cmds []Cmder, e error) { - for _, cmd := range cmds { - if cmd.Err() == nil { - cmd.SetErr(e) - } - } -} - -func cmdsFirstErr(cmds []Cmder) error { - for _, cmd := range cmds { - if err := cmd.Err(); err != nil { - return err - } - } - return nil -} - -func writeCmds(wr *proto.Writer, cmds []Cmder) error { - for _, cmd := range cmds { - if err := writeCmd(wr, cmd); err != nil { - return err - } - } - return nil -} - -func writeCmd(wr *proto.Writer, cmd Cmder) error { - return wr.WriteArgs(cmd.Args()) -} - -func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { - if pos := cmd.firstKeyPos(); pos != 0 { - return int(pos) - } - - switch cmd.Name() { - case "eval", "evalsha": - if cmd.stringArg(2) != "0" { - return 3 - } - - return 0 - case "publish": - return 1 - case "memory": - // https://github.com/redis/redis/issues/7493 - if cmd.stringArg(1) == "usage" { - return 2 - } - } - - if info != nil { - return int(info.FirstKeyPos) - } - return 0 -} - -func cmdString(cmd Cmder, val interface{}) string { - b := make([]byte, 0, 64) - - for i, arg := range cmd.Args() { - if i > 0 { - b = append(b, ' ') - } - b = internal.AppendArg(b, arg) - } - - if err := cmd.Err(); err != nil { - b = append(b, ": "...) - b = append(b, err.Error()...) - } else if val != nil { - b = append(b, ": "...) - b = internal.AppendArg(b, val) - } - - return internal.String(b) -} - -//------------------------------------------------------------------------------ - -type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - - _readTimeout *time.Duration -} - -var _ Cmder = (*Cmd)(nil) - -func (cmd *baseCmd) Name() string { - if len(cmd.args) == 0 { - return "" - } - // Cmd name must be lower cased. - return internal.ToLower(cmd.stringArg(0)) -} - -func (cmd *baseCmd) FullName() string { - switch name := cmd.Name(); name { - case "cluster", "command": - if len(cmd.args) == 1 { - return name - } - if s2, ok := cmd.args[1].(string); ok { - return name + " " + s2 - } - return name - default: - return name - } -} - -func (cmd *baseCmd) Args() []interface{} { - return cmd.args -} - -func (cmd *baseCmd) stringArg(pos int) string { - if pos < 0 || pos >= len(cmd.args) { - return "" - } - arg := cmd.args[pos] - switch v := arg.(type) { - case string: - return v - default: - // TODO: consider using appendArg - return fmt.Sprint(v) - } -} - -func (cmd *baseCmd) firstKeyPos() int8 { - return cmd.keyPos -} - -func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { - cmd.keyPos = keyPos -} - -func (cmd *baseCmd) SetErr(e error) { - cmd.err = e -} - -func (cmd *baseCmd) Err() error { - return cmd.err -} - -func (cmd *baseCmd) readTimeout() *time.Duration { - return cmd._readTimeout -} - -func (cmd *baseCmd) setReadTimeout(d time.Duration) { - cmd._readTimeout = &d -} - -//------------------------------------------------------------------------------ - -type Cmd struct { - baseCmd - - val interface{} -} - -func NewCmd(ctx context.Context, args ...interface{}) *Cmd { - return &Cmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *Cmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *Cmd) SetVal(val interface{}) { - cmd.val = val -} - -func (cmd *Cmd) Val() interface{} { - return cmd.val -} - -func (cmd *Cmd) Result() (interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *Cmd) Text() (string, error) { - if cmd.err != nil { - return "", cmd.err - } - return toString(cmd.val) -} - -func toString(val interface{}) (string, error) { - switch val := val.(type) { - case string: - return val, nil - default: - err := fmt.Errorf("redis: unexpected type=%T for String", val) - return "", err - } -} - -func (cmd *Cmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - switch val := cmd.val.(type) { - case int64: - return int(val), nil - case string: - return strconv.Atoi(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int", val) - return 0, err - } -} - -func (cmd *Cmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toInt64(cmd.val) -} - -func toInt64(val interface{}) (int64, error) { - switch val := val.(type) { - case int64: - return val, nil - case string: - return strconv.ParseInt(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Int64", val) - return 0, err - } -} - -func (cmd *Cmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toUint64(cmd.val) -} - -func toUint64(val interface{}) (uint64, error) { - switch val := val.(type) { - case int64: - return uint64(val), nil - case string: - return strconv.ParseUint(val, 10, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) - return 0, err - } -} - -func (cmd *Cmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat32(cmd.val) -} - -func toFloat32(val interface{}) (float32, error) { - switch val := val.(type) { - case int64: - return float32(val), nil - case string: - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil - default: - err := fmt.Errorf("redis: unexpected type=%T for Float32", val) - return 0, err - } -} - -func (cmd *Cmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return toFloat64(cmd.val) -} - -func toFloat64(val interface{}) (float64, error) { - switch val := val.(type) { - case int64: - return float64(val), nil - case string: - return strconv.ParseFloat(val, 64) - default: - err := fmt.Errorf("redis: unexpected type=%T for Float64", val) - return 0, err - } -} - -func (cmd *Cmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return toBool(cmd.val) -} - -func toBool(val interface{}) (bool, error) { - switch val := val.(type) { - case int64: - return val != 0, nil - case string: - return strconv.ParseBool(val) - default: - err := fmt.Errorf("redis: unexpected type=%T for Bool", val) - return false, err - } -} - -func (cmd *Cmd) Slice() ([]interface{}, error) { - if cmd.err != nil { - return nil, cmd.err - } - switch val := cmd.val.(type) { - case []interface{}: - return val, nil - default: - return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) - } -} - -func (cmd *Cmd) StringSlice() ([]string, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - ss := make([]string, len(slice)) - for i, iface := range slice { - val, err := toString(iface) - if err != nil { - return nil, err - } - ss[i] = val - } - return ss, nil -} - -func (cmd *Cmd) Int64Slice() ([]int64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]int64, len(slice)) - for i, iface := range slice { - val, err := toInt64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Uint64Slice() ([]uint64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - nums := make([]uint64, len(slice)) - for i, iface := range slice { - val, err := toUint64(iface) - if err != nil { - return nil, err - } - nums[i] = val - } - return nums, nil -} - -func (cmd *Cmd) Float32Slice() ([]float32, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float32, len(slice)) - for i, iface := range slice { - val, err := toFloat32(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) Float64Slice() ([]float64, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - floats := make([]float64, len(slice)) - for i, iface := range slice { - val, err := toFloat64(iface) - if err != nil { - return nil, err - } - floats[i] = val - } - return floats, nil -} - -func (cmd *Cmd) BoolSlice() ([]bool, error) { - slice, err := cmd.Slice() - if err != nil { - return nil, err - } - - bools := make([]bool, len(slice)) - for i, iface := range slice { - val, err := toBool(iface) - if err != nil { - return nil, err - } - bools[i] = val - } - return bools, nil -} - -func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadReply(sliceParser) - return err -} - -// sliceParser implements proto.MultiBulkParse. -func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { - vals := make([]interface{}, n) - for i := 0; i < len(vals); i++ { - v, err := rd.ReadReply(sliceParser) - if err != nil { - if err == Nil { - vals[i] = nil - continue - } - if err, ok := err.(proto.RedisError); ok { - vals[i] = err - continue - } - return nil, err - } - vals[i] = v - } - return vals, nil -} - -//------------------------------------------------------------------------------ - -type SliceCmd struct { - baseCmd - - val []interface{} -} - -var _ Cmder = (*SliceCmd)(nil) - -func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { - return &SliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SliceCmd) SetVal(val []interface{}) { - cmd.val = val -} - -func (cmd *SliceCmd) Val() []interface{} { - return cmd.val -} - -func (cmd *SliceCmd) Result() ([]interface{}, error) { - return cmd.val, cmd.err -} - -func (cmd *SliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *SliceCmd) Scan(dst interface{}) error { - if cmd.err != nil { - return cmd.err - } - - // Pass the list of keys and values. - // Skip the first two args for: HMGET key - var args []interface{} - if cmd.args[0] == "hmget" { - args = cmd.args[2:] - } else { - // Otherwise, it's: MGET field field ... - args = cmd.args[1:] - } - - return hscan.Scan(dst, args, cmd.val) -} - -func (cmd *SliceCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(sliceParser) - if err != nil { - return err - } - cmd.val = v.([]interface{}) - return nil -} - -//------------------------------------------------------------------------------ - -type StatusCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StatusCmd)(nil) - -func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { - return &StatusCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StatusCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StatusCmd) Val() string { - return cmd.val -} - -func (cmd *StatusCmd) Result() (string, error) { - return cmd.val, cmd.err -} - -func (cmd *StatusCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type IntCmd struct { - baseCmd - - val int64 -} - -var _ Cmder = (*IntCmd)(nil) - -func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { - return &IntCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntCmd) SetVal(val int64) { - cmd.val = val -} - -func (cmd *IntCmd) Val() int64 { - return cmd.val -} - -func (cmd *IntCmd) Result() (int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntCmd) Uint64() (uint64, error) { - return uint64(cmd.val), cmd.err -} - -func (cmd *IntCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadIntReply() - return err -} - -//------------------------------------------------------------------------------ - -type IntSliceCmd struct { - baseCmd - - val []int64 -} - -var _ Cmder = (*IntSliceCmd)(nil) - -func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { - return &IntSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *IntSliceCmd) SetVal(val []int64) { - cmd.val = val -} - -func (cmd *IntSliceCmd) Val() []int64 { - return cmd.val -} - -func (cmd *IntSliceCmd) Result() ([]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *IntSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]int64, n) - for i := 0; i < len(cmd.val); i++ { - num, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = num - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type DurationCmd struct { - baseCmd - - val time.Duration - precision time.Duration -} - -var _ Cmder = (*DurationCmd)(nil) - -func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { - return &DurationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - precision: precision, - } -} - -func (cmd *DurationCmd) SetVal(val time.Duration) { - cmd.val = val -} - -func (cmd *DurationCmd) Val() time.Duration { - return cmd.val -} - -func (cmd *DurationCmd) Result() (time.Duration, error) { - return cmd.val, cmd.err -} - -func (cmd *DurationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *DurationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadIntReply() - if err != nil { - return err - } - switch n { - // -2 if the key does not exist - // -1 if the key exists but has no associated expire - case -2, -1: - cmd.val = time.Duration(n) - default: - cmd.val = time.Duration(n) * cmd.precision - } - return nil -} - -//------------------------------------------------------------------------------ - -type TimeCmd struct { - baseCmd - - val time.Time -} - -var _ Cmder = (*TimeCmd)(nil) - -func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { - return &TimeCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *TimeCmd) SetVal(val time.Time) { - cmd.val = val -} - -func (cmd *TimeCmd) Val() time.Time { - return cmd.val -} - -func (cmd *TimeCmd) Result() (time.Time, error) { - return cmd.val, cmd.err -} - -func (cmd *TimeCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *TimeCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d elements, expected 2", n) - } - - sec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - microsec, err := rd.ReadInt() - if err != nil { - return nil, err - } - - cmd.val = time.Unix(sec, microsec*1000) - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolCmd struct { - baseCmd - - val bool -} - -var _ Cmder = (*BoolCmd)(nil) - -func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { - return &BoolCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolCmd) SetVal(val bool) { - cmd.val = val -} - -func (cmd *BoolCmd) Val() bool { - return cmd.val -} - -func (cmd *BoolCmd) Result() (bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(nil) - // `SET key value NX` returns nil when key already exists. But - // `SETNX key value` returns bool (0/1). So convert nil to bool. - if err == Nil { - cmd.val = false - return nil - } - if err != nil { - return err - } - switch v := v.(type) { - case int64: - cmd.val = v == 1 - return nil - case string: - cmd.val = v == "OK" - return nil - default: - return fmt.Errorf("got %T, wanted int64 or string", v) - } -} - -//------------------------------------------------------------------------------ - -type StringCmd struct { - baseCmd - - val string -} - -var _ Cmder = (*StringCmd)(nil) - -func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { - return &StringCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringCmd) SetVal(val string) { - cmd.val = val -} - -func (cmd *StringCmd) Val() string { - return cmd.val -} - -func (cmd *StringCmd) Result() (string, error) { - return cmd.Val(), cmd.err -} - -func (cmd *StringCmd) Bytes() ([]byte, error) { - return util.StringToBytes(cmd.val), cmd.err -} - -func (cmd *StringCmd) Bool() (bool, error) { - if cmd.err != nil { - return false, cmd.err - } - return strconv.ParseBool(cmd.val) -} - -func (cmd *StringCmd) Int() (int, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.Atoi(cmd.Val()) -} - -func (cmd *StringCmd) Int64() (int64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseInt(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Uint64() (uint64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseUint(cmd.Val(), 10, 64) -} - -func (cmd *StringCmd) Float32() (float32, error) { - if cmd.err != nil { - return 0, cmd.err - } - f, err := strconv.ParseFloat(cmd.Val(), 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -func (cmd *StringCmd) Float64() (float64, error) { - if cmd.err != nil { - return 0, cmd.err - } - return strconv.ParseFloat(cmd.Val(), 64) -} - -func (cmd *StringCmd) Time() (time.Time, error) { - if cmd.err != nil { - return time.Time{}, cmd.err - } - return time.Parse(time.RFC3339Nano, cmd.Val()) -} - -func (cmd *StringCmd) Scan(val interface{}) error { - if cmd.err != nil { - return cmd.err - } - return proto.Scan([]byte(cmd.val), val) -} - -func (cmd *StringCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadString() - return err -} - -//------------------------------------------------------------------------------ - -type FloatCmd struct { - baseCmd - - val float64 -} - -var _ Cmder = (*FloatCmd)(nil) - -func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { - return &FloatCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatCmd) SetVal(val float64) { - cmd.val = val -} - -func (cmd *FloatCmd) Val() float64 { - return cmd.val -} - -func (cmd *FloatCmd) Result() (float64, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *FloatCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { - cmd.val, err = rd.ReadFloatReply() - return err -} - -//------------------------------------------------------------------------------ - -type FloatSliceCmd struct { - baseCmd - - val []float64 -} - -var _ Cmder = (*FloatSliceCmd)(nil) - -func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { - return &FloatSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *FloatSliceCmd) SetVal(val []float64) { - cmd.val = val -} - -func (cmd *FloatSliceCmd) Val() []float64 { - return cmd.val -} - -func (cmd *FloatSliceCmd) Result() ([]float64, error) { - return cmd.val, cmd.err -} - -func (cmd *FloatSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]float64, n) - for i := 0; i < len(cmd.val); i++ { - switch num, err := rd.ReadFloatReply(); { - case err == Nil: - cmd.val[i] = 0 - case err != nil: - return nil, err - default: - cmd.val[i] = num - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringSliceCmd struct { - baseCmd - - val []string -} - -var _ Cmder = (*StringSliceCmd)(nil) - -func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { - return &StringSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringSliceCmd) SetVal(val []string) { - cmd.val = val -} - -func (cmd *StringSliceCmd) Val() []string { - return cmd.val -} - -func (cmd *StringSliceCmd) Result() ([]string, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *StringSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { - return proto.ScanSlice(cmd.Val(), container) -} - -func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]string, n) - for i := 0; i < len(cmd.val); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.val[i] = "" - case err != nil: - return nil, err - default: - cmd.val[i] = s - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type BoolSliceCmd struct { - baseCmd - - val []bool -} - -var _ Cmder = (*BoolSliceCmd)(nil) - -func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { - return &BoolSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *BoolSliceCmd) SetVal(val []bool) { - cmd.val = val -} - -func (cmd *BoolSliceCmd) Val() []bool { - return cmd.val -} - -func (cmd *BoolSliceCmd) Result() ([]bool, error) { - return cmd.val, cmd.err -} - -func (cmd *BoolSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]bool, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.val[i] = n == 1 - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStringMapCmd struct { - baseCmd - - val map[string]string -} - -var _ Cmder = (*StringStringMapCmd)(nil) - -func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { - return &StringStringMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStringMapCmd) SetVal(val map[string]string) { - cmd.val = val -} - -func (cmd *StringStringMapCmd) Val() map[string]string { - return cmd.val -} - -func (cmd *StringStringMapCmd) Result() (map[string]string, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStringMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -// Scan scans the results from the map into a destination struct. The map keys -// are matched in the Redis struct fields by the `redis:"field"` tag. -func (cmd *StringStringMapCmd) Scan(dest interface{}) error { - if cmd.err != nil { - return cmd.err - } - - strct, err := hscan.Struct(dest) - if err != nil { - return err - } - - for k, v := range cmd.val { - if err := strct.Scan(k, v); err != nil { - return err - } - } - - return nil -} - -func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]string, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val[key] = value - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringIntMapCmd struct { - baseCmd - - val map[string]int64 -} - -var _ Cmder = (*StringIntMapCmd)(nil) - -func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { - return &StringIntMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { - cmd.val = val -} - -func (cmd *StringIntMapCmd) Val() map[string]int64 { - return cmd.val -} - -func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { - return cmd.val, cmd.err -} - -func (cmd *StringIntMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]int64, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - n, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - cmd.val[key] = n - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type StringStructMapCmd struct { - baseCmd - - val map[string]struct{} -} - -var _ Cmder = (*StringStructMapCmd)(nil) - -func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { - return &StringStructMapCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { - cmd.val = val -} - -func (cmd *StringStructMapCmd) Val() map[string]struct{} { - return cmd.val -} - -func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { - return cmd.val, cmd.err -} - -func (cmd *StringStructMapCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]struct{}, n) - for i := int64(0); i < n; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - cmd.val[key] = struct{}{} - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XMessage struct { - ID string - Values map[string]interface{} -} - -type XMessageSliceCmd struct { - baseCmd - - val []XMessage -} - -var _ Cmder = (*XMessageSliceCmd)(nil) - -func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { - return &XMessageSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { - cmd.val = val -} - -func (cmd *XMessageSliceCmd) Val() []XMessage { - return cmd.val -} - -func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { - return cmd.val, cmd.err -} - -func (cmd *XMessageSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { - var err error - cmd.val, err = readXMessageSlice(rd) - return err -} - -func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - msgs := make([]XMessage, n) - for i := 0; i < n; i++ { - var err error - msgs[i], err = readXMessage(rd) - if err != nil { - return nil, err - } - } - return msgs, nil -} - -func readXMessage(rd *proto.Reader) (XMessage, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return XMessage{}, err - } - if n != 2 { - return XMessage{}, fmt.Errorf("got %d, wanted 2", n) - } - - id, err := rd.ReadString() - if err != nil { - return XMessage{}, err - } - - var values map[string]interface{} - - v, err := rd.ReadArrayReply(stringInterfaceMapParser) - if err != nil { - if err != proto.Nil { - return XMessage{}, err - } - } else { - values = v.(map[string]interface{}) - } - - return XMessage{ - ID: id, - Values: values, - }, nil -} - -// stringInterfaceMapParser implements proto.MultiBulkParse. -func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { - m := make(map[string]interface{}, n/2) - for i := int64(0); i < n; i += 2 { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - value, err := rd.ReadString() - if err != nil { - return nil, err - } - - m[key] = value - } - return m, nil -} - -//------------------------------------------------------------------------------ - -type XStream struct { - Stream string - Messages []XMessage -} - -type XStreamSliceCmd struct { - baseCmd - - val []XStream -} - -var _ Cmder = (*XStreamSliceCmd)(nil) - -func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { - return &XStreamSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XStreamSliceCmd) SetVal(val []XStream) { - cmd.val = val -} - -func (cmd *XStreamSliceCmd) Val() []XStream { - return cmd.val -} - -func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XStreamSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XStream, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - stream, err := rd.ReadString() - if err != nil { - return nil, err - } - - msgs, err := readXMessageSlice(rd) - if err != nil { - return nil, err - } - - cmd.val[i] = XStream{ - Stream: stream, - Messages: msgs, - } - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPending struct { - Count int64 - Lower string - Higher string - Consumers map[string]int64 -} - -type XPendingCmd struct { - baseCmd - val *XPending -} - -var _ Cmder = (*XPendingCmd)(nil) - -func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { - return &XPendingCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingCmd) SetVal(val *XPending) { - cmd.val = val -} - -func (cmd *XPendingCmd) Val() *XPending { - return cmd.val -} - -func (cmd *XPendingCmd) Result() (*XPending, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - count, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - lower, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - higher, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = &XPending{ - Count: count, - Lower: lower, - Higher: higher, - } - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - for i := int64(0); i < n; i++ { - _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - - consumerName, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumerPending, err := rd.ReadInt() - if err != nil { - return nil, err - } - - if cmd.val.Consumers == nil { - cmd.val.Consumers = make(map[string]int64) - } - cmd.val.Consumers[consumerName] = consumerPending - - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - if err != nil && err != Nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XPendingExt struct { - ID string - Consumer string - Idle time.Duration - RetryCount int64 -} - -type XPendingExtCmd struct { - baseCmd - val []XPendingExt -} - -var _ Cmder = (*XPendingExtCmd)(nil) - -func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { - return &XPendingExtCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { - cmd.val = val -} - -func (cmd *XPendingExtCmd) Val() []XPendingExt { - return cmd.val -} - -func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { - return cmd.val, cmd.err -} - -func (cmd *XPendingExtCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]XPendingExt, 0, n) - for i := int64(0); i < n; i++ { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 4 { - return nil, fmt.Errorf("got %d, wanted 4", n) - } - - id, err := rd.ReadString() - if err != nil { - return nil, err - } - - consumer, err := rd.ReadString() - if err != nil && err != Nil { - return nil, err - } - - idle, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - retryCount, err := rd.ReadIntReply() - if err != nil && err != Nil { - return nil, err - } - - cmd.val = append(cmd.val, XPendingExt{ - ID: id, - Consumer: consumer, - Idle: time.Duration(idle) * time.Millisecond, - RetryCount: retryCount, - }) - return nil, nil - }) - if err != nil { - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimCmd struct { - baseCmd - - start string - val []XMessage -} - -var _ Cmder = (*XAutoClaimCmd)(nil) - -func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { - return &XAutoClaimCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val, err = readXMessageSlice(rd) - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XAutoClaimJustIDCmd struct { - baseCmd - - start string - val []string -} - -var _ Cmder = (*XAutoClaimJustIDCmd)(nil) - -func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { - return &XAutoClaimJustIDCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { - cmd.val = val - cmd.start = start -} - -func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { - return cmd.val, cmd.start -} - -func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { - return cmd.val, cmd.start, cmd.err -} - -func (cmd *XAutoClaimJustIDCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 2 { - return nil, fmt.Errorf("got %d, wanted 2", n) - } - var err error - - cmd.start, err = rd.ReadString() - if err != nil { - return nil, err - } - - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - cmd.val = make([]string, nn) - for i := 0; i < nn; i++ { - cmd.val[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type XInfoConsumersCmd struct { - baseCmd - val []XInfoConsumer -} - -type XInfoConsumer struct { - Name string - Pending int64 - Idle int64 -} - -var _ Cmder = (*XInfoConsumersCmd)(nil) - -func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { - return &XInfoConsumersCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "consumers", stream, group}, - }, - } -} - -func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { - cmd.val = val -} - -func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { - return cmd.val -} - -func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoConsumersCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoConsumer, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXConsumerInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { - var consumer XInfoConsumer - - n, err := rd.ReadArrayLen() - if err != nil { - return consumer, err - } - if n != 6 { - return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) - } - - for i := 0; i < 3; i++ { - key, err := rd.ReadString() - if err != nil { - return consumer, err - } - - val, err := rd.ReadString() - if err != nil { - return consumer, err - } - - switch key { - case "name": - consumer.Name = val - case "pending": - consumer.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - case "idle": - consumer.Idle, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return consumer, err - } - default: - return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) - } - } - - return consumer, nil -} - -//------------------------------------------------------------------------------ - -type XInfoGroupsCmd struct { - baseCmd - val []XInfoGroup -} - -type XInfoGroup struct { - Name string - Consumers int64 - Pending int64 - LastDeliveredID string -} - -var _ Cmder = (*XInfoGroupsCmd)(nil) - -func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { - return &XInfoGroupsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "groups", stream}, - }, - } -} - -func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { - cmd.val = val -} - -func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { - return cmd.val -} - -func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoGroupsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]XInfoGroup, n) - - for i := 0; i < n; i++ { - cmd.val[i], err = readXGroupInfo(rd) - if err != nil { - return err - } - } - - return nil -} - -func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { - var group XInfoGroup - - n, err := rd.ReadArrayLen() - if err != nil { - return group, err - } - if n != 8 { - return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) - } - - for i := 0; i < 4; i++ { - key, err := rd.ReadString() - if err != nil { - return group, err - } - - val, err := rd.ReadString() - if err != nil { - return group, err - } - - switch key { - case "name": - group.Name = val - case "consumers": - group.Consumers, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "pending": - group.Pending, err = strconv.ParseInt(val, 0, 64) - if err != nil { - return group, err - } - case "last-delivered-id": - group.LastDeliveredID = val - default: - return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) - } - } - - return group, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamCmd struct { - baseCmd - val *XInfoStream -} - -type XInfoStream struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - Groups int64 - LastGeneratedID string - FirstEntry XMessage - LastEntry XMessage -} - -var _ Cmder = (*XInfoStreamCmd)(nil) - -func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { - return &XInfoStreamCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: []interface{}{"xinfo", "stream", stream}, - }, - } -} - -func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { - cmd.val = val -} - -func (cmd *XInfoStreamCmd) Val() *XInfoStream { - return cmd.val -} - -func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadReply(xStreamInfoParser) - if err != nil { - return err - } - cmd.val = v.(*XInfoStream) - return nil -} - -func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - if n != 14 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 14", n) - } - var info XInfoStream - for i := 0; i < 7; i++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - switch key { - case "length": - info.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - info.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - info.RadixTreeNodes, err = rd.ReadIntReply() - case "groups": - info.Groups, err = rd.ReadIntReply() - case "last-generated-id": - info.LastGeneratedID, err = rd.ReadString() - case "first-entry": - info.FirstEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - case "last-entry": - info.LastEntry, err = readXMessage(rd) - if err == Nil { - err = nil - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return nil, err - } - } - return &info, nil -} - -//------------------------------------------------------------------------------ - -type XInfoStreamFullCmd struct { - baseCmd - val *XInfoStreamFull -} - -type XInfoStreamFull struct { - Length int64 - RadixTreeKeys int64 - RadixTreeNodes int64 - LastGeneratedID string - Entries []XMessage - Groups []XInfoStreamGroup -} - -type XInfoStreamGroup struct { - Name string - LastDeliveredID string - PelCount int64 - Pending []XInfoStreamGroupPending - Consumers []XInfoStreamConsumer -} - -type XInfoStreamGroupPending struct { - ID string - Consumer string - DeliveryTime time.Time - DeliveryCount int64 -} - -type XInfoStreamConsumer struct { - Name string - SeenTime time.Time - PelCount int64 - Pending []XInfoStreamConsumerPending -} - -type XInfoStreamConsumerPending struct { - ID string - DeliveryTime time.Time - DeliveryCount int64 -} - -var _ Cmder = (*XInfoStreamFullCmd)(nil) - -func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { - return &XInfoStreamFullCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { - cmd.val = val -} - -func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { - return cmd.val -} - -func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { - return cmd.val, cmd.err -} - -func (cmd *XInfoStreamFullCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - if n != 12 { - return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 12", n) - } - - cmd.val = &XInfoStreamFull{} - - for i := 0; i < 6; i++ { - key, err := rd.ReadString() - if err != nil { - return err - } - - switch key { - case "length": - cmd.val.Length, err = rd.ReadIntReply() - case "radix-tree-keys": - cmd.val.RadixTreeKeys, err = rd.ReadIntReply() - case "radix-tree-nodes": - cmd.val.RadixTreeNodes, err = rd.ReadIntReply() - case "last-generated-id": - cmd.val.LastGeneratedID, err = rd.ReadString() - case "entries": - cmd.val.Entries, err = readXMessageSlice(rd) - case "groups": - cmd.val.Groups, err = readStreamGroups(rd) - default: - return fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - if err != nil { - return err - } - } - return nil -} - -func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - groups := make([]XInfoStreamGroup, 0, n) - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 10 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 10", nn) - } - - group := XInfoStreamGroup{} - - for f := 0; f < 5; f++ { - key, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch key { - case "name": - group.Name, err = rd.ReadString() - case "last-delivered-id": - group.LastDeliveredID, err = rd.ReadString() - case "pel-count": - group.PelCount, err = rd.ReadIntReply() - case "pending": - group.Pending, err = readXInfoStreamGroupPending(rd) - case "consumers": - group.Consumers, err = readXInfoStreamConsumers(rd) - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", key) - } - - if err != nil { - return nil, err - } - } - - groups = append(groups, group) - } - - return groups, nil -} - -func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - pending := make([]XInfoStreamGroupPending, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 4 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 4", nn) - } - - p := XInfoStreamGroupPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - p.Consumer, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - pending = append(pending, p) - } - - return pending, nil -} - -func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - consumers := make([]XInfoStreamConsumer, 0, n) - - for i := 0; i < n; i++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 8 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ - "wanted 8", nn) - } - - c := XInfoStreamConsumer{} - - for f := 0; f < 4; f++ { - cKey, err := rd.ReadString() - if err != nil { - return nil, err - } - - switch cKey { - case "name": - c.Name, err = rd.ReadString() - case "seen-time": - seen, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) - case "pel-count": - c.PelCount, err = rd.ReadIntReply() - case "pending": - pendingNumber, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - - c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) - - for pn := 0; pn < pendingNumber; pn++ { - nn, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if nn != 3 { - return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ - "wanted 3", nn) - } - - p := XInfoStreamConsumerPending{} - - p.ID, err = rd.ReadString() - if err != nil { - return nil, err - } - - delivery, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) - - p.DeliveryCount, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - - c.Pending = append(c.Pending, p) - } - default: - return nil, fmt.Errorf("redis: unexpected content %s "+ - "in XINFO STREAM reply", cKey) - } - if err != nil { - return nil, err - } - } - consumers = append(consumers, c) - } - - return consumers, nil -} - -//------------------------------------------------------------------------------ - -type ZSliceCmd struct { - baseCmd - - val []Z -} - -var _ Cmder = (*ZSliceCmd)(nil) - -func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { - return &ZSliceCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZSliceCmd) SetVal(val []Z) { - cmd.val = val -} - -func (cmd *ZSliceCmd) Val() []Z { - return cmd.val -} - -func (cmd *ZSliceCmd) Result() ([]Z, error) { - return cmd.val, cmd.err -} - -func (cmd *ZSliceCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]Z, n/2) - for i := 0; i < len(cmd.val); i++ { - member, err := rd.ReadString() - if err != nil { - return nil, err - } - - score, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = Z{ - Member: member, - Score: score, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ZWithKeyCmd struct { - baseCmd - - val *ZWithKey -} - -var _ Cmder = (*ZWithKeyCmd)(nil) - -func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { - return &ZWithKeyCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { - cmd.val = val -} - -func (cmd *ZWithKeyCmd) Val() *ZWithKey { - return cmd.val -} - -func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ZWithKeyCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - if n != 3 { - return nil, fmt.Errorf("got %d elements, expected 3", n) - } - - cmd.val = &ZWithKey{} - var err error - - cmd.val.Key, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Member, err = rd.ReadString() - if err != nil { - return nil, err - } - - cmd.val.Score, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type ScanCmd struct { - baseCmd - - page []string - cursor uint64 - - process cmdable -} - -var _ Cmder = (*ScanCmd)(nil) - -func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { - return &ScanCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - process: process, - } -} - -func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { - cmd.page = page - cmd.cursor = cursor -} - -func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { - return cmd.page, cmd.cursor -} - -func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { - return cmd.page, cmd.cursor, cmd.err -} - -func (cmd *ScanCmd) String() string { - return cmdString(cmd, cmd.page) -} - -func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { - cmd.page, cmd.cursor, err = rd.ReadScanReply() - return err -} - -// Iterator creates a new ScanIterator. -func (cmd *ScanCmd) Iterator() *ScanIterator { - return &ScanIterator{ - cmd: cmd, - } -} - -//------------------------------------------------------------------------------ - -type ClusterNode struct { - ID string - Addr string -} - -type ClusterSlot struct { - Start int - End int - Nodes []ClusterNode -} - -type ClusterSlotsCmd struct { - baseCmd - - val []ClusterSlot -} - -var _ Cmder = (*ClusterSlotsCmd)(nil) - -func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { - return &ClusterSlotsCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { - cmd.val = val -} - -func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { - return cmd.val -} - -func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *ClusterSlotsCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]ClusterSlot, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 2 { - err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) - return nil, err - } - - start, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - end, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - nodes := make([]ClusterNode, n-2) - for j := 0; j < len(nodes); j++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 && n != 3 { - err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) - return nil, err - } - - ip, err := rd.ReadString() - if err != nil { - return nil, err - } - - port, err := rd.ReadString() - if err != nil { - return nil, err - } - - nodes[j].Addr = net.JoinHostPort(ip, port) - - if n == 3 { - id, err := rd.ReadString() - if err != nil { - return nil, err - } - nodes[j].ID = id - } - } - - cmd.val[i] = ClusterSlot{ - Start: int(start), - End: int(end), - Nodes: nodes, - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -// GeoLocation is used with GeoAdd to add geospatial location. -type GeoLocation struct { - Name string - Longitude, Latitude, Dist float64 - GeoHash int64 -} - -// GeoRadiusQuery is used with GeoRadius to query geospatial index. -type GeoRadiusQuery struct { - Radius float64 - // Can be m, km, ft, or mi. Default is km. - Unit string - WithCoord bool - WithDist bool - WithGeoHash bool - Count int - // Can be ASC or DESC. Default is no sort order. - Sort string - Store string - StoreDist string -} - -type GeoLocationCmd struct { - baseCmd - - q *GeoRadiusQuery - locations []GeoLocation -} - -var _ Cmder = (*GeoLocationCmd)(nil) - -func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { - return &GeoLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: geoLocationArgs(q, args...), - }, - q: q, - } -} - -func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { - args = append(args, q.Radius) - if q.Unit != "" { - args = append(args, q.Unit) - } else { - args = append(args, "km") - } - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithGeoHash { - args = append(args, "withhash") - } - if q.Count > 0 { - args = append(args, "count", q.Count) - } - if q.Sort != "" { - args = append(args, q.Sort) - } - if q.Store != "" { - args = append(args, "store") - args = append(args, q.Store) - } - if q.StoreDist != "" { - args = append(args, "storedist") - args = append(args, q.StoreDist) - } - return args -} - -func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { - cmd.locations = locations -} - -func (cmd *GeoLocationCmd) Val() []GeoLocation { - return cmd.locations -} - -func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { - return cmd.locations, cmd.err -} - -func (cmd *GeoLocationCmd) String() string { - return cmdString(cmd, cmd.locations) -} - -func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { - v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) - if err != nil { - return err - } - cmd.locations = v.([]GeoLocation) - return nil -} - -func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - locs := make([]GeoLocation, 0, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(newGeoLocationParser(q)) - if err != nil { - return nil, err - } - switch vv := v.(type) { - case string: - locs = append(locs, GeoLocation{ - Name: vv, - }) - case *GeoLocation: - // TODO: avoid copying - locs = append(locs, *vv) - default: - return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) - } - } - return locs, nil - } -} - -func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { - return func(rd *proto.Reader, n int64) (interface{}, error) { - var loc GeoLocation - var err error - - loc.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - if q.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - if q.WithGeoHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return nil, err - } - } - if q.WithCoord { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n != 2 { - return nil, fmt.Errorf("got %d coordinates, expected 2", n) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return nil, err - } - } - - return &loc, nil - } -} - -//------------------------------------------------------------------------------ - -// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. -type GeoSearchQuery struct { - Member string - - // Latitude and Longitude when using FromLonLat option. - Longitude float64 - Latitude float64 - - // Distance and unit when using ByRadius option. - // Can use m, km, ft, or mi. Default is km. - Radius float64 - RadiusUnit string - - // Height, width and unit when using ByBox option. - // Can be m, km, ft, or mi. Default is km. - BoxWidth float64 - BoxHeight float64 - BoxUnit string - - // Can be ASC or DESC. Default is no sort order. - Sort string - Count int - CountAny bool -} - -type GeoSearchLocationQuery struct { - GeoSearchQuery - - WithCoord bool - WithDist bool - WithHash bool -} - -type GeoSearchStoreQuery struct { - GeoSearchQuery - - // When using the StoreDist option, the command stores the items in a - // sorted set populated with their distance from the center of the circle or box, - // as a floating-point number, in the same unit specified for that shape. - StoreDist bool -} - -func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { - args = geoSearchArgs(&q.GeoSearchQuery, args) - - if q.WithCoord { - args = append(args, "withcoord") - } - if q.WithDist { - args = append(args, "withdist") - } - if q.WithHash { - args = append(args, "withhash") - } - - return args -} - -func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { - if q.Member != "" { - args = append(args, "frommember", q.Member) - } else { - args = append(args, "fromlonlat", q.Longitude, q.Latitude) - } - - if q.Radius > 0 { - if q.RadiusUnit == "" { - q.RadiusUnit = "km" - } - args = append(args, "byradius", q.Radius, q.RadiusUnit) - } else { - if q.BoxUnit == "" { - q.BoxUnit = "km" - } - args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) - } - - if q.Sort != "" { - args = append(args, q.Sort) - } - - if q.Count > 0 { - args = append(args, "count", q.Count) - if q.CountAny { - args = append(args, "any") - } - } - - return args -} - -type GeoSearchLocationCmd struct { - baseCmd - - opt *GeoSearchLocationQuery - val []GeoLocation -} - -var _ Cmder = (*GeoSearchLocationCmd)(nil) - -func NewGeoSearchLocationCmd( - ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, -) *GeoSearchLocationCmd { - return &GeoSearchLocationCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - opt: opt, - } -} - -func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { - cmd.val = val -} - -func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { - return cmd.val -} - -func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { - return cmd.val, cmd.err -} - -func (cmd *GeoSearchLocationCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { - n, err := rd.ReadArrayLen() - if err != nil { - return err - } - - cmd.val = make([]GeoLocation, n) - for i := 0; i < n; i++ { - _, err = rd.ReadArrayLen() - if err != nil { - return err - } - - var loc GeoLocation - - loc.Name, err = rd.ReadString() - if err != nil { - return err - } - if cmd.opt.WithDist { - loc.Dist, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - if cmd.opt.WithHash { - loc.GeoHash, err = rd.ReadIntReply() - if err != nil { - return err - } - } - if cmd.opt.WithCoord { - nn, err := rd.ReadArrayLen() - if err != nil { - return err - } - if nn != 2 { - return fmt.Errorf("got %d coordinates, expected 2", nn) - } - - loc.Longitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - loc.Latitude, err = rd.ReadFloatReply() - if err != nil { - return err - } - } - - cmd.val[i] = loc - } - - return nil -} - -//------------------------------------------------------------------------------ - -type GeoPos struct { - Longitude, Latitude float64 -} - -type GeoPosCmd struct { - baseCmd - - val []*GeoPos -} - -var _ Cmder = (*GeoPosCmd)(nil) - -func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { - return &GeoPosCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { - cmd.val = val -} - -func (cmd *GeoPosCmd) Val() []*GeoPos { - return cmd.val -} - -func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *GeoPosCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]*GeoPos, n) - for i := 0; i < len(cmd.val); i++ { - i := i - _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - longitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - latitude, err := rd.ReadFloatReply() - if err != nil { - return nil, err - } - - cmd.val[i] = &GeoPos{ - Longitude: longitude, - Latitude: latitude, - } - return nil, nil - }) - if err != nil { - if err == Nil { - cmd.val[i] = nil - continue - } - return nil, err - } - } - return nil, nil - }) - return err -} - -//------------------------------------------------------------------------------ - -type CommandInfo struct { - Name string - Arity int8 - Flags []string - ACLFlags []string - FirstKeyPos int8 - LastKeyPos int8 - StepCount int8 - ReadOnly bool -} - -type CommandsInfoCmd struct { - baseCmd - - val map[string]*CommandInfo -} - -var _ Cmder = (*CommandsInfoCmd)(nil) - -func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { - return &CommandsInfoCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { - cmd.val = val -} - -func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { - return cmd.val -} - -func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *CommandsInfoCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make(map[string]*CommandInfo, n) - for i := int64(0); i < n; i++ { - v, err := rd.ReadReply(commandInfoParser) - if err != nil { - return nil, err - } - vv := v.(*CommandInfo) - cmd.val[vv.Name] = vv - } - return nil, nil - }) - return err -} - -func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { - const numArgRedis5 = 6 - const numArgRedis6 = 7 - - switch n { - case numArgRedis5, numArgRedis6: - // continue - default: - return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) - } - - var cmd CommandInfo - var err error - - cmd.Name, err = rd.ReadString() - if err != nil { - return nil, err - } - - arity, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.Arity = int8(arity) - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.Flags = make([]string, n) - for i := 0; i < len(cmd.Flags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.Flags[i] = "" - case err != nil: - return nil, err - default: - cmd.Flags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - firstKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.FirstKeyPos = int8(firstKeyPos) - - lastKeyPos, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.LastKeyPos = int8(lastKeyPos) - - stepCount, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - cmd.StepCount = int8(stepCount) - - for _, flag := range cmd.Flags { - if flag == "readonly" { - cmd.ReadOnly = true - break - } - } - - if n == numArgRedis5 { - return &cmd, nil - } - - _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.ACLFlags = make([]string, n) - for i := 0; i < len(cmd.ACLFlags); i++ { - switch s, err := rd.ReadString(); { - case err == Nil: - cmd.ACLFlags[i] = "" - case err != nil: - return nil, err - default: - cmd.ACLFlags[i] = s - } - } - return nil, nil - }) - if err != nil { - return nil, err - } - - return &cmd, nil -} - -//------------------------------------------------------------------------------ - -type cmdsInfoCache struct { - fn func(ctx context.Context) (map[string]*CommandInfo, error) - - once internal.Once - cmds map[string]*CommandInfo -} - -func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { - return &cmdsInfoCache{ - fn: fn, - } -} - -func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { - err := c.once.Do(func() error { - cmds, err := c.fn(ctx) - if err != nil { - return err - } - - // Extensions have cmd names in upper case. Convert them to lower case. - for k, v := range cmds { - lower := internal.ToLower(k) - if lower != k { - cmds[lower] = v - } - } - - c.cmds = cmds - return nil - }) - return c.cmds, err -} - -//------------------------------------------------------------------------------ - -type SlowLog struct { - ID int64 - Time time.Time - Duration time.Duration - Args []string - // These are also optional fields emitted only by Redis 4.0 or greater: - // https://redis.io/commands/slowlog#output-format - ClientAddr string - ClientName string -} - -type SlowLogCmd struct { - baseCmd - - val []SlowLog -} - -var _ Cmder = (*SlowLogCmd)(nil) - -func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { - return &SlowLogCmd{ - baseCmd: baseCmd{ - ctx: ctx, - args: args, - }, - } -} - -func (cmd *SlowLogCmd) SetVal(val []SlowLog) { - cmd.val = val -} - -func (cmd *SlowLogCmd) Val() []SlowLog { - return cmd.val -} - -func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { - return cmd.Val(), cmd.Err() -} - -func (cmd *SlowLogCmd) String() string { - return cmdString(cmd, cmd.val) -} - -func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { - _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { - cmd.val = make([]SlowLog, n) - for i := 0; i < len(cmd.val); i++ { - n, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if n < 4 { - err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) - return nil, err - } - - id, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - - createdAt, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - createdAtTime := time.Unix(createdAt, 0) - - costs, err := rd.ReadIntReply() - if err != nil { - return nil, err - } - costsDuration := time.Duration(costs) * time.Microsecond - - cmdLen, err := rd.ReadArrayLen() - if err != nil { - return nil, err - } - if cmdLen < 1 { - err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) - return nil, err - } - - cmdString := make([]string, cmdLen) - for i := 0; i < cmdLen; i++ { - cmdString[i], err = rd.ReadString() - if err != nil { - return nil, err - } - } - - var address, name string - for i := 4; i < n; i++ { - str, err := rd.ReadString() - if err != nil { - return nil, err - } - if i == 4 { - address = str - } else if i == 5 { - name = str - } - } - - cmd.val[i] = SlowLog{ - ID: id, - Time: createdAtTime, - Duration: costsDuration, - Args: cmdString, - ClientAddr: address, - ClientName: name, - } - } - return nil, nil - }) - return err -} diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go deleted file mode 100644 index 0e6ca779b1..0000000000 --- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go +++ /dev/null @@ -1,332 +0,0 @@ -package proto - -import ( - "bufio" - "fmt" - "io" - - "github.com/go-redis/redis/v8/internal/util" -) - -// redis resp protocol data type. -const ( - ErrorReply = '-' - StatusReply = '+' - IntReply = ':' - StringReply = '$' - ArrayReply = '*' -) - -//------------------------------------------------------------------------------ - -const Nil = RedisError("redis: nil") // nolint:errname - -type RedisError string - -func (e RedisError) Error() string { return string(e) } - -func (RedisError) RedisError() {} - -//------------------------------------------------------------------------------ - -type MultiBulkParse func(*Reader, int64) (interface{}, error) - -type Reader struct { - rd *bufio.Reader - _buf []byte -} - -func NewReader(rd io.Reader) *Reader { - return &Reader{ - rd: bufio.NewReader(rd), - _buf: make([]byte, 64), - } -} - -func (r *Reader) Buffered() int { - return r.rd.Buffered() -} - -func (r *Reader) Peek(n int) ([]byte, error) { - return r.rd.Peek(n) -} - -func (r *Reader) Reset(rd io.Reader) { - r.rd.Reset(rd) -} - -func (r *Reader) ReadLine() ([]byte, error) { - line, err := r.readLine() - if err != nil { - return nil, err - } - if isNilReply(line) { - return nil, Nil - } - return line, nil -} - -// readLine that returns an error if: -// - there is a pending read error; -// - or line does not end with \r\n. -func (r *Reader) readLine() ([]byte, error) { - b, err := r.rd.ReadSlice('\n') - if err != nil { - if err != bufio.ErrBufferFull { - return nil, err - } - - full := make([]byte, len(b)) - copy(full, b) - - b, err = r.rd.ReadBytes('\n') - if err != nil { - return nil, err - } - - full = append(full, b...) //nolint:makezero - b = full - } - if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' { - return nil, fmt.Errorf("redis: invalid reply: %q", b) - } - return b[:len(b)-2], nil -} - -func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { - line, err := r.ReadLine() - if err != nil { - return nil, err - } - - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case StatusReply: - return string(line[1:]), nil - case IntReply: - return util.ParseInt(line[1:], 10, 64) - case StringReply: - return r.readStringReply(line) - case ArrayReply: - n, err := parseArrayLen(line) - if err != nil { - return nil, err - } - if m == nil { - err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line) - return nil, err - } - return m(r, n) - } - return nil, fmt.Errorf("redis: can't parse %.100q", line) -} - -func (r *Reader) ReadIntReply() (int64, error) { - line, err := r.ReadLine() - if err != nil { - return 0, err - } - switch line[0] { - case ErrorReply: - return 0, ParseErrorReply(line) - case IntReply: - return util.ParseInt(line[1:], 10, 64) - default: - return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) - } -} - -func (r *Reader) ReadString() (string, error) { - line, err := r.ReadLine() - if err != nil { - return "", err - } - switch line[0] { - case ErrorReply: - return "", ParseErrorReply(line) - case StringReply: - return r.readStringReply(line) - case StatusReply: - return string(line[1:]), nil - case IntReply: - return string(line[1:]), nil - default: - return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) - } -} - -func (r *Reader) readStringReply(line []byte) (string, error) { - if isNilReply(line) { - return "", Nil - } - - replyLen, err := util.Atoi(line[1:]) - if err != nil { - return "", err - } - - b := make([]byte, replyLen+2) - _, err = io.ReadFull(r.rd, b) - if err != nil { - return "", err - } - - return util.BytesToString(b[:replyLen]), nil -} - -func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { - line, err := r.ReadLine() - if err != nil { - return nil, err - } - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case ArrayReply: - n, err := parseArrayLen(line) - if err != nil { - return nil, err - } - return m(r, n) - default: - return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) - } -} - -func (r *Reader) ReadArrayLen() (int, error) { - line, err := r.ReadLine() - if err != nil { - return 0, err - } - switch line[0] { - case ErrorReply: - return 0, ParseErrorReply(line) - case ArrayReply: - n, err := parseArrayLen(line) - if err != nil { - return 0, err - } - return int(n), nil - default: - return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) - } -} - -func (r *Reader) ReadScanReply() ([]string, uint64, error) { - n, err := r.ReadArrayLen() - if err != nil { - return nil, 0, err - } - if n != 2 { - return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) - } - - cursor, err := r.ReadUint() - if err != nil { - return nil, 0, err - } - - n, err = r.ReadArrayLen() - if err != nil { - return nil, 0, err - } - - keys := make([]string, n) - - for i := 0; i < n; i++ { - key, err := r.ReadString() - if err != nil { - return nil, 0, err - } - keys[i] = key - } - - return keys, cursor, err -} - -func (r *Reader) ReadInt() (int64, error) { - b, err := r.readTmpBytesReply() - if err != nil { - return 0, err - } - return util.ParseInt(b, 10, 64) -} - -func (r *Reader) ReadUint() (uint64, error) { - b, err := r.readTmpBytesReply() - if err != nil { - return 0, err - } - return util.ParseUint(b, 10, 64) -} - -func (r *Reader) ReadFloatReply() (float64, error) { - b, err := r.readTmpBytesReply() - if err != nil { - return 0, err - } - return util.ParseFloat(b, 64) -} - -func (r *Reader) readTmpBytesReply() ([]byte, error) { - line, err := r.ReadLine() - if err != nil { - return nil, err - } - switch line[0] { - case ErrorReply: - return nil, ParseErrorReply(line) - case StringReply: - return r._readTmpBytesReply(line) - case StatusReply: - return line[1:], nil - default: - return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) - } -} - -func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) { - if isNilReply(line) { - return nil, Nil - } - - replyLen, err := util.Atoi(line[1:]) - if err != nil { - return nil, err - } - - buf := r.buf(replyLen + 2) - _, err = io.ReadFull(r.rd, buf) - if err != nil { - return nil, err - } - - return buf[:replyLen], nil -} - -func (r *Reader) buf(n int) []byte { - if n <= cap(r._buf) { - return r._buf[:n] - } - d := n - cap(r._buf) - r._buf = append(r._buf, make([]byte, d)...) - return r._buf -} - -func isNilReply(b []byte) bool { - return len(b) == 3 && - (b[0] == StringReply || b[0] == ArrayReply) && - b[1] == '-' && b[2] == '1' -} - -func ParseErrorReply(line []byte) error { - return RedisError(string(line[1:])) -} - -func parseArrayLen(line []byte) (int64, error) { - if isNilReply(line) { - return 0, Nil - } - return util.ParseInt(line[1:], 10, 64) -} diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go deleted file mode 100644 index fd2f434094..0000000000 --- a/vendor/github.com/go-redis/redis/v8/internal/safe.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build appengine -// +build appengine - -package internal - -func String(b []byte) string { - return string(b) -} - -func Bytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go deleted file mode 100644 index 9f2e418f79..0000000000 --- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !appengine -// +build !appengine - -package internal - -import "unsafe" - -// String converts byte slice to string. -func String(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -// Bytes converts string to byte slice. -func Bytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) -} diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go deleted file mode 100644 index bcf8a2a94b..0000000000 --- a/vendor/github.com/go-redis/redis/v8/redis.go +++ /dev/null @@ -1,773 +0,0 @@ -package redis - -import ( - "context" - "errors" - "fmt" - "sync/atomic" - "time" - - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/pool" - "github.com/go-redis/redis/v8/internal/proto" -) - -// Nil reply returned by Redis when key does not exist. -const Nil = proto.Nil - -func SetLogger(logger internal.Logging) { - internal.Logger = logger -} - -//------------------------------------------------------------------------------ - -type Hook interface { - BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) - AfterProcess(ctx context.Context, cmd Cmder) error - - BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) - AfterProcessPipeline(ctx context.Context, cmds []Cmder) error -} - -type hooks struct { - hooks []Hook -} - -func (hs *hooks) lock() { - hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)] -} - -func (hs hooks) clone() hooks { - clone := hs - clone.lock() - return clone -} - -func (hs *hooks) AddHook(hook Hook) { - hs.hooks = append(hs.hooks, hook) -} - -func (hs hooks) process( - ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error, -) error { - if len(hs.hooks) == 0 { - err := fn(ctx, cmd) - cmd.SetErr(err) - return err - } - - var hookIndex int - var retErr error - - for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { - ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd) - if retErr != nil { - cmd.SetErr(retErr) - } - } - - if retErr == nil { - retErr = fn(ctx, cmd) - cmd.SetErr(retErr) - } - - for hookIndex--; hookIndex >= 0; hookIndex-- { - if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil { - retErr = err - cmd.SetErr(retErr) - } - } - - return retErr -} - -func (hs hooks) processPipeline( - ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, -) error { - if len(hs.hooks) == 0 { - err := fn(ctx, cmds) - return err - } - - var hookIndex int - var retErr error - - for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { - ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds) - if retErr != nil { - setCmdsErr(cmds, retErr) - } - } - - if retErr == nil { - retErr = fn(ctx, cmds) - } - - for hookIndex--; hookIndex >= 0; hookIndex-- { - if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil { - retErr = err - setCmdsErr(cmds, retErr) - } - } - - return retErr -} - -func (hs hooks) processTxPipeline( - ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, -) error { - cmds = wrapMultiExec(ctx, cmds) - return hs.processPipeline(ctx, cmds, fn) -} - -//------------------------------------------------------------------------------ - -type baseClient struct { - opt *Options - connPool pool.Pooler - - onClose func() error // hook called when client is closed -} - -func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient { - return &baseClient{ - opt: opt, - connPool: connPool, - } -} - -func (c *baseClient) clone() *baseClient { - clone := *c - return &clone -} - -func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { - opt := c.opt.clone() - opt.ReadTimeout = timeout - opt.WriteTimeout = timeout - - clone := c.clone() - clone.opt = opt - - return clone -} - -func (c *baseClient) String() string { - return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) -} - -func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { - cn, err := c.connPool.NewConn(ctx) - if err != nil { - return nil, err - } - - err = c.initConn(ctx, cn) - if err != nil { - _ = c.connPool.CloseConn(cn) - return nil, err - } - - return cn, nil -} - -func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { - if c.opt.Limiter != nil { - err := c.opt.Limiter.Allow() - if err != nil { - return nil, err - } - } - - cn, err := c._getConn(ctx) - if err != nil { - if c.opt.Limiter != nil { - c.opt.Limiter.ReportResult(err) - } - return nil, err - } - - return cn, nil -} - -func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { - cn, err := c.connPool.Get(ctx) - if err != nil { - return nil, err - } - - if cn.Inited { - return cn, nil - } - - if err := c.initConn(ctx, cn); err != nil { - c.connPool.Remove(ctx, cn, err) - if err := errors.Unwrap(err); err != nil { - return nil, err - } - return nil, err - } - - return cn, nil -} - -func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { - if cn.Inited { - return nil - } - cn.Inited = true - - if c.opt.Password == "" && - c.opt.DB == 0 && - !c.opt.readOnly && - c.opt.OnConnect == nil { - return nil - } - - connPool := pool.NewSingleConnPool(c.connPool, cn) - conn := newConn(ctx, c.opt, connPool) - - _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { - if c.opt.Password != "" { - if c.opt.Username != "" { - pipe.AuthACL(ctx, c.opt.Username, c.opt.Password) - } else { - pipe.Auth(ctx, c.opt.Password) - } - } - - if c.opt.DB > 0 { - pipe.Select(ctx, c.opt.DB) - } - - if c.opt.readOnly { - pipe.ReadOnly(ctx) - } - - return nil - }) - if err != nil { - return err - } - - if c.opt.OnConnect != nil { - return c.opt.OnConnect(ctx, conn) - } - return nil -} - -func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { - if c.opt.Limiter != nil { - c.opt.Limiter.ReportResult(err) - } - - if isBadConn(err, false, c.opt.Addr) { - c.connPool.Remove(ctx, cn, err) - } else { - c.connPool.Put(ctx, cn) - } -} - -func (c *baseClient) withConn( - ctx context.Context, fn func(context.Context, *pool.Conn) error, -) error { - cn, err := c.getConn(ctx) - if err != nil { - return err - } - - defer func() { - c.releaseConn(ctx, cn, err) - }() - - done := ctx.Done() //nolint:ifshort - - if done == nil { - err = fn(ctx, cn) - return err - } - - errc := make(chan error, 1) - go func() { errc <- fn(ctx, cn) }() - - select { - case <-done: - _ = cn.Close() - // Wait for the goroutine to finish and send something. - <-errc - - err = ctx.Err() - return err - case err = <-errc: - return err - } -} - -func (c *baseClient) process(ctx context.Context, cmd Cmder) error { - var lastErr error - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - attempt := attempt - - retry, err := c._process(ctx, cmd, attempt) - if err == nil || !retry { - return err - } - - lastErr = err - } - return lastErr -} - -func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return false, err - } - } - - retryTimeout := uint32(1) - err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmd(wr, cmd) - }) - if err != nil { - return err - } - - err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply) - if err != nil { - if cmd.readTimeout() == nil { - atomic.StoreUint32(&retryTimeout, 1) - } - return err - } - - return nil - }) - if err == nil { - return false, nil - } - - retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) - return retry, err -} - -func (c *baseClient) retryBackoff(attempt int) time.Duration { - return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) -} - -func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { - if timeout := cmd.readTimeout(); timeout != nil { - t := *timeout - if t == 0 { - return 0 - } - return t + 10*time.Second - } - return c.opt.ReadTimeout -} - -// Close closes the client, releasing any open resources. -// -// It is rare to Close a Client, as the Client is meant to be -// long-lived and shared between many goroutines. -func (c *baseClient) Close() error { - var firstErr error - if c.onClose != nil { - if err := c.onClose(); err != nil { - firstErr = err - } - } - if err := c.connPool.Close(); err != nil && firstErr == nil { - firstErr = err - } - return firstErr -} - -func (c *baseClient) getAddr() string { - return c.opt.Addr -} - -func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds) -} - -func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds) -} - -type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) - -func (c *baseClient) generalProcessPipeline( - ctx context.Context, cmds []Cmder, p pipelineProcessor, -) error { - err := c._generalProcessPipeline(ctx, cmds, p) - if err != nil { - setCmdsErr(cmds, err) - return err - } - return cmdsFirstErr(cmds) -} - -func (c *baseClient) _generalProcessPipeline( - ctx context.Context, cmds []Cmder, p pipelineProcessor, -) error { - var lastErr error - for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { - return err - } - } - - var canRetry bool - lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - var err error - canRetry, err = p(ctx, cn, cmds) - return err - }) - if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { - return lastErr - } - } - return lastErr -} - -func (c *baseClient) pipelineProcessCmds( - ctx context.Context, cn *pool.Conn, cmds []Cmder, -) (bool, error) { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return true, err - } - - err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - return pipelineReadCmds(rd, cmds) - }) - return true, err -} - -func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { - for _, cmd := range cmds { - err := cmd.readReply(rd) - cmd.SetErr(err) - if err != nil && !isRedisError(err) { - return err - } - } - return nil -} - -func (c *baseClient) txPipelineProcessCmds( - ctx context.Context, cn *pool.Conn, cmds []Cmder, -) (bool, error) { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return true, err - } - - err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - statusCmd := cmds[0].(*StatusCmd) - // Trim multi and exec. - cmds = cmds[1 : len(cmds)-1] - - err := txPipelineReadQueued(rd, statusCmd, cmds) - if err != nil { - return err - } - - return pipelineReadCmds(rd, cmds) - }) - return false, err -} - -func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder { - if len(cmds) == 0 { - panic("not reached") - } - cmdCopy := make([]Cmder, len(cmds)+2) - cmdCopy[0] = NewStatusCmd(ctx, "multi") - copy(cmdCopy[1:], cmds) - cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec") - return cmdCopy -} - -func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { - // Parse queued replies. - if err := statusCmd.readReply(rd); err != nil { - return err - } - - for range cmds { - if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { - return err - } - } - - // Parse number of replies. - line, err := rd.ReadLine() - if err != nil { - if err == Nil { - err = TxFailedErr - } - return err - } - - switch line[0] { - case proto.ErrorReply: - return proto.ParseErrorReply(line) - case proto.ArrayReply: - // ok - default: - err := fmt.Errorf("redis: expected '*', but got line %q", line) - return err - } - - return nil -} - -//------------------------------------------------------------------------------ - -// Client is a Redis client representing a pool of zero or more -// underlying connections. It's safe for concurrent use by multiple -// goroutines. -type Client struct { - *baseClient - cmdable - hooks - ctx context.Context -} - -// NewClient returns a client to the Redis Server specified by Options. -func NewClient(opt *Options) *Client { - opt.init() - - c := Client{ - baseClient: newBaseClient(opt, newConnPool(opt)), - ctx: context.Background(), - } - c.cmdable = c.Process - - return &c -} - -func (c *Client) clone() *Client { - clone := *c - clone.cmdable = clone.Process - clone.hooks.lock() - return &clone -} - -func (c *Client) WithTimeout(timeout time.Duration) *Client { - clone := c.clone() - clone.baseClient = c.baseClient.withTimeout(timeout) - return clone -} - -func (c *Client) Context() context.Context { - return c.ctx -} - -func (c *Client) WithContext(ctx context.Context) *Client { - if ctx == nil { - panic("nil context") - } - clone := c.clone() - clone.ctx = ctx - return clone -} - -func (c *Client) Conn(ctx context.Context) *Conn { - return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool)) -} - -// Do creates a Cmd from the args and processes the cmd. -func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { - cmd := NewCmd(ctx, args...) - _ = c.Process(ctx, cmd) - return cmd -} - -func (c *Client) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.baseClient.process) -} - -func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) -} - -func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) -} - -// Options returns read-only Options that were used to create the client. -func (c *Client) Options() *Options { - return c.opt -} - -type PoolStats pool.Stats - -// PoolStats returns connection pool stats. -func (c *Client) PoolStats() *PoolStats { - stats := c.connPool.Stats() - return (*PoolStats)(stats) -} - -func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(ctx, fn) -} - -func (c *Client) Pipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, - } - pipe.init() - return &pipe -} - -func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(ctx, fn) -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *Client) TxPipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, - } - pipe.init() - return &pipe -} - -func (c *Client) pubSub() *PubSub { - pubsub := &PubSub{ - opt: c.opt, - - newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { - return c.newConn(ctx) - }, - closeConn: c.connPool.CloseConn, - } - pubsub.init() - return pubsub -} - -// Subscribe subscribes the client to the specified channels. -// Channels can be omitted to create empty subscription. -// Note that this method does not wait on a response from Redis, so the -// subscription may not be active immediately. To force the connection to wait, -// you may call the Receive() method on the returned *PubSub like so: -// -// sub := client.Subscribe(queryResp) -// iface, err := sub.Receive() -// if err != nil { -// // handle error -// } -// -// // Should be *Subscription, but others are possible if other actions have been -// // taken on sub since it was created. -// switch iface.(type) { -// case *Subscription: -// // subscribe succeeded -// case *Message: -// // received first message -// case *Pong: -// // pong received -// default: -// // handle error -// } -// -// ch := sub.Channel() -func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.Subscribe(ctx, channels...) - } - return pubsub -} - -// PSubscribe subscribes the client to the given patterns. -// Patterns can be omitted to create empty subscription. -func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { - pubsub := c.pubSub() - if len(channels) > 0 { - _ = pubsub.PSubscribe(ctx, channels...) - } - return pubsub -} - -//------------------------------------------------------------------------------ - -type conn struct { - baseClient - cmdable - statefulCmdable - hooks // TODO: inherit hooks -} - -// Conn represents a single Redis connection rather than a pool of connections. -// Prefer running commands from Client unless there is a specific need -// for a continuous single Redis connection. -type Conn struct { - *conn - ctx context.Context -} - -func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn { - c := Conn{ - conn: &conn{ - baseClient: baseClient{ - opt: opt, - connPool: connPool, - }, - }, - ctx: ctx, - } - c.cmdable = c.Process - c.statefulCmdable = c.Process - return &c -} - -func (c *Conn) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.baseClient.process) -} - -func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) -} - -func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) -} - -func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.Pipeline().Pipelined(ctx, fn) -} - -func (c *Conn) Pipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, - } - pipe.init() - return &pipe -} - -func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { - return c.TxPipeline().Pipelined(ctx, fn) -} - -// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. -func (c *Conn) TxPipeline() Pipeliner { - pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, - } - pipe.init() - return &pipe -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 6c16c255ff..c6f66f1039 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -56,6 +56,7 @@ type Unmarshaler struct { // implement JSONPBMarshaler so that the custom format can be produced. // // The JSON unmarshaling must follow the JSON to proto specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index 685c80a62b..e9438a93f3 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -55,6 +55,7 @@ type Marshaler struct { // implement JSONPBUnmarshaler so that the custom format can be parsed. // // The JSON marshaling must follow the proto to JSON specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 63dc057851..a5a138613a 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -12,6 +12,31 @@ import ( // Symbols defined in public import of google/protobuf/descriptor.proto. +type Edition = descriptorpb.Edition + +const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN +const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2 +const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3 +const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023 +const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024 +const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY +const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY +const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY +const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY +const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY +const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX + +var Edition_name = descriptorpb.Edition_name +var Edition_value = descriptorpb.Edition_value + +type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState + +const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION +const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED + +var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name +var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value + type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE @@ -39,8 +64,8 @@ var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_val type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL -const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value @@ -72,6 +97,31 @@ const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value +type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention + +const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN +const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME +const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE + +var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name +var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value + +type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType + +const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN +const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE +const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE +const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE +const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD +const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF +const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM +const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY +const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE +const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD + +var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name +var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value + type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN @@ -81,10 +131,77 @@ const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value +type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence + +const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN +const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT +const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT +const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED + +var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name +var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value + +type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType + +const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN +const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN +const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED + +var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name +var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value + +type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding + +const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED +const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED + +var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name +var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value + +type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation + +const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN +const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY +const FeatureSet_NONE = descriptorpb.FeatureSet_NONE + +var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name +var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value + +type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding + +const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN +const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED +const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED + +var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name +var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value + +type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat + +const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN +const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW +const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT + +var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name +var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value + +type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic + +const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE +const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET +const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS + +var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name +var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value + type FileDescriptorSet = descriptorpb.FileDescriptorSet type FileDescriptorProto = descriptorpb.FileDescriptorProto type DescriptorProto = descriptorpb.DescriptorProto type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions + +const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification + type FieldDescriptorProto = descriptorpb.FieldDescriptorProto type OneofDescriptorProto = descriptorpb.OneofDescriptorProto type EnumDescriptorProto = descriptorpb.EnumDescriptorProto @@ -103,7 +220,6 @@ const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_Optimiz const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices -const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas @@ -118,8 +234,10 @@ type FieldOptions = descriptorpb.FieldOptions const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak +const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact type OneofOptions = descriptorpb.OneofOptions type EnumOptions = descriptorpb.EnumOptions @@ -129,6 +247,7 @@ const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecat type EnumValueOptions = descriptorpb.EnumValueOptions const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated +const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact type ServiceOptions = descriptorpb.ServiceOptions @@ -140,12 +259,17 @@ const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Depr const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel type UninterpretedOption = descriptorpb.UninterpretedOption +type FeatureSet = descriptorpb.FeatureSet +type FeatureSetDefaults = descriptorpb.FeatureSetDefaults type SourceCodeInfo = descriptorpb.SourceCodeInfo type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f57365..fdff3fdb4c 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md index 6062a4dacd..eab5dbf7ba 100644 --- a/vendor/github.com/google/btree/README.md +++ b/vendor/github.com/google/btree/README.md @@ -1,7 +1,5 @@ # BTree implementation for Go -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - This package provides an in-memory B-Tree implementation for Go, useful as an ordered, mutable data structure. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index b83acdbc6d..969b910d70 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.18 +// +build !go1.18 + // Package btree implements in-memory B-Trees of arbitrary degree. // // btree implements an in-memory B-Tree for use as an ordered data structure. diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go new file mode 100644 index 0000000000..e44a0f4880 --- /dev/null +++ b/vendor/github.com/google/btree/btree_generic.go @@ -0,0 +1,1083 @@ +// Copyright 2014-2022 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific +// instantiation of that generic for the Item interface, with a backwards- +// compatible API. Before go1.18, generics are not supported, +// and BTree is just an implementation based around the Item interface. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +// +// There are two implementations; those suffixed with 'G' are generics, usable +// for any type, and require a passed-in "less" function to define their ordering. +// Those without this prefix are specific to the 'Item' interface, and use +// its 'Less' function for ordering. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +// FreeListG represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList, in particular when they're created with Clone. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeListG[T any] struct { + mu sync.Mutex + freelist []*node[T] +} + +// NewFreeListG creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeListG[T any](size int) *FreeListG[T] { + return &FreeListG[T]{freelist: make([]*node[T], 0, size)} +} + +func (f *FreeListG[T]) newNode() (n *node[T]) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node[T]) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIteratorG[T any] func(item T) bool + +// Ordered represents the set of types for which the '<' operator work. +type Ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string +} + +// Less[T] returns a default LessFunc that uses the '<' operator for types that support it. +func Less[T Ordered]() LessFunc[T] { + return func(a, b T) bool { return a < b } +} + +// NewOrderedG creates a new B-Tree for ordered types. +func NewOrderedG[T Ordered](degree int) *BTreeG[T] { + return NewG[T](degree, Less[T]()) +} + +// NewG creates a new B-Tree with the given degree. +// +// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The passed-in LessFunc determines how objects of type T are ordered. +func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] { + return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize)) +} + +// NewWithFreeListG creates a new B-Tree that uses the given node free list. +func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] { + if degree <= 1 { + panic("bad degree") + } + return &BTreeG[T]{ + degree: degree, + cow: ©OnWriteContext[T]{freelist: f, less: less}, + } +} + +// items stores items in a node. +type items[T any] []T + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items[T]) insertAt(index int, item T) { + var zero T + *s = append(*s, zero) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items[T]) removeAt(index int) T { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + var zero T + (*s)[len(*s)-1] = zero + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items[T]) pop() (out T) { + index := len(*s) - 1 + out = (*s)[index] + var zero T + (*s)[index] = zero + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items[T]) truncate(index int) { + var toClear items[T] + *s, toClear = (*s)[:index], (*s)[index:] + var zero T + for i := 0; i < len(toClear); i++ { + toClear[i] = zero + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return less(item, s[i]) + }) + if i > 0 && !less(s[i-1], item) { + return i - 1, true + } + return i, false +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node[T any] struct { + items items[T] + children items[*node[T]] + cow *copyOnWriteContext[T] +} + +func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items[T], len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(items[*node[T]], len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node[T]) mutableChild(i int) *node[T] { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node[T]) split(i int) (T, *node[T]) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node[T]) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) { + i, found := n.items.find(item, n.cow.less) + if found { + out := n.items[i] + n.items[i] = item + return out, true + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case n.cow.less(item, inTree): + // no change, we want first split node + case n.cow.less(inTree, item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out, true + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node[T]) get(key T) (_ T, _ bool) { + i, found := n.items.find(key, n.cow.less) + if found { + return n.items[i], true + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return +} + +// min returns the first item in the subtree. +func min[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return + } + return n.items[0], true +} + +// max returns the last item in the subtree. +func max[T any](n *node[T]) (_ T, found bool) { + if n == nil { + return + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return + } + return n.items[len(n.items)-1], true +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop(), true + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(item, n.cow.less) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i), true + } + return + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + var zero T + n.items[i], _ = child.remove(zero, minItems, removeMax) + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +type optionalItem[T any] struct { + item T + valid bool +} + +func optional[T any](item T) optionalItem[T] { + return optionalItem[T]{item: item, valid: true} +} +func empty[T any]() optionalItem[T] { + return optionalItem[T]{} +} + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start.valid { + index, _ = n.items.find(start.item, n.cow.less) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) { + hit = true + continue + } + hit = true + if stop.valid && !n.cow.less(n.items[i], stop.item) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start.valid { + index, found = n.items.find(start.item, n.cow.less) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start.valid && !n.cow.less(n.items[i], start.item) { + if !includeStart || hit || n.cow.less(start.item, n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop.valid && !n.cow.less(stop.item, n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// print is used for testing/debugging purposes. +func (n *node[T]) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTreeG is a generic implementation of a B-Tree. +// +// BTreeG stores items of type T in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTreeG[T any] struct { + degree int + length int + root *node[T] + cow *copyOnWriteContext[T] +} + +// LessFunc[T] determines how to order a type 'T'. It should implement a strict +// ordering, and should return true if within that ordering, 'a' < 'b'. +type LessFunc[T any] func(a, b T) bool + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext[T any] struct { + freelist *FreeListG[T] + less LessFunc[T] +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTreeG[T]) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTreeG[T]) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext[T]) newNode() (n *node[T]) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned, +// and the second return value is true. Otherwise, (zeroValue, false) +// +// nil cannot be added to the tree (will panic). +func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out, outb := t.root.insert(item, t.maxItems()) + if !outb { + t.length++ + } + return out, outb +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) Delete(item T) (T, bool) { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMin() (T, bool) { + var zero T + return t.deleteItem(zero, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns (zeroValue, false). +func (t *BTreeG[T]) DeleteMax() (T, bool) { + var zero T + return t.deleteItem(zero, removeMax) +} + +func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) { + if t.root == nil || len(t.root.items) == 0 { + return + } + t.root = t.root.mutableFor(t.cow) + out, outb := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if outb { + t.length-- + } + return out, outb +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) { + if t.root == nil { + return + } + t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns +// (zeroValue, false) if unable to find that item. +func (t *BTreeG[T]) Get(key T) (_ T, _ bool) { + if t.root == nil { + return + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Min() (_ T, _ bool) { + return min(t.root) +} + +// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty. +func (t *BTreeG[T]) Max() (_ T, _ bool) { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTreeG[T]) Has(key T) bool { + _, ok := t.Get(key) + return ok +} + +// Len returns the number of items currently in the tree. +func (t *BTreeG[T]) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTreeG[T]) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node[T]) reset(c *copyOnWriteContext[T]) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree BTreeG[Item] + +var itemLess LessFunc[Item] = func(a, b Item) bool { + return a.Less(b) +} + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return (*BTree)(NewG[Item](degree, itemLess)) +} + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList FreeListG[Item] + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return (*FreeList)(NewFreeListG[Item](size)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f))) +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator ItemIteratorG[Item] + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + return (*BTree)((*BTreeG[Item])(t).Clone()) +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + i, _ := (*BTreeG[Item])(t).Delete(item) + return i +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + i, _ := (*BTreeG[Item])(t).DeleteMax() + return i +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + i, _ := (*BTreeG[Item])(t).DeleteMin() + return i +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + i, _ := (*BTreeG[Item])(t).Get(key) + return i +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + i, _ := (*BTreeG[Item])(t).Max() + return i +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + i, _ := (*BTreeG[Item])(t).Min() + return i +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return (*BTreeG[Item])(t).Has(key) +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item) + return i +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator)) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator)) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator)) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator)) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range [last, pivot), until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator)) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator)) +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return (*BTreeG[Item])(t).Len() +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + (*BTreeG[Item])(t).Clear(addNodesToFreelist) +} diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic/jsonschema/display.go index 028a760a91..8677ed49a0 100644 --- a/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/vendor/github.com/google/gnostic/jsonschema/display.go @@ -46,8 +46,23 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } + if schema.ReadOnly != nil && *schema.ReadOnly { + result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) + } if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema#": + fallthrough + case "#": + fallthrough + case "": + result += indent + "id: " + *(schema.ID) + "\n" + default: + result += indent + "$id: " + *(schema.ID) + "\n" + } } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic/jsonschema/models.go index 4781bdc5f5..0d877249ab 100644 --- a/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/vendor/github.com/google/gnostic/jsonschema/models.go @@ -23,9 +23,11 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + ReadOnly *bool + WriteOnly *bool // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic/jsonschema/reader.go index b8583d4660..a909a34128 100644 --- a/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic/jsonschema/reader.go @@ -165,7 +165,6 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil } return nil diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic/jsonschema/writer.go index 340dc5f933..15b1f90506 100644 --- a/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic/jsonschema/writer.go @@ -16,6 +16,7 @@ package jsonschema import ( "fmt" + "strings" "gopkg.in/yaml.v3" ) @@ -33,7 +34,11 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - result += "\"" + value.Value + "\"" + if value.Tag == "!!bool" { + result += value.Value + } else { + result += "\"" + value.Value + "\"" + } case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -58,7 +63,11 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" + if item.Tag == "!!bool" { + result += innerIndent + item.Value + } else { + result += innerIndent + "\"" + item.Value + "\"" + } case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -260,11 +269,26 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema": + fallthrough + case "#": + fallthrough + case "": + content = appendPair(content, "id", nodeForString(*schema.ID)) + default: + content = appendPair(content, "$id", nodeForString(*schema.ID)) + } } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } + if schema.ReadOnly != nil && *schema.ReadOnly { + content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) + } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go index 0f17907667..28c2777d51 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go @@ -7887,7 +7887,12 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go index 5f4a7025ea..d54a84db7c 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go @@ -8560,7 +8560,12 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go index 499e7f932d..90a56f5526 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,12 +6760,13 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, + 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto index 1be335b89b..7aede5ed90 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic/openapiv3/README.md index 5ee12d92e2..83603b82aa 100644 --- a/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/vendor/github.com/google/gnostic/openapiv3/README.md @@ -19,3 +19,7 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). + +### How to rebuild + +`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go new file mode 100644 index 0000000000..ae242f3043 --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go @@ -0,0 +1,183 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/vendor/github.com/google/gnostic/openapiv3/annotations.proto new file mode 100644 index 0000000000..0bd87810db --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.proto @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "openapiv3/OpenAPIv3.proto"; +import "google/protobuf/descriptor.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} \ No newline at end of file diff --git a/vendor/github.com/google/go-github/v45/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v45/github/orgs_audit_log.go deleted file mode 100644 index 52bacfed9a..0000000000 --- a/vendor/github.com/google/go-github/v45/github/orgs_audit_log.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2021 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// GetAuditLogOptions sets up optional parameters to query audit-log endpoint. -type GetAuditLogOptions struct { - Phrase *string `url:"phrase,omitempty"` // A search phrase. (Optional.) - Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.) - Order *string `url:"order,omitempty"` // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.) - - ListCursorOptions -} - -// HookConfig describes metadata about a webhook configuration. -type HookConfig struct { - ContentType *string `json:"content_type,omitempty"` - InsecureSSL *string `json:"insecure_ssl,omitempty"` - URL *string `json:"url,omitempty"` - - // Secret is returned obfuscated by GitHub, but it can be set for outgoing requests. - Secret *string `json:"secret,omitempty"` -} - -// AuditEntry describes the fields that may be represented by various audit-log "action" entries. -// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions -type AuditEntry struct { - Action *string `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`. - Active *bool `json:"active,omitempty"` - ActiveWas *bool `json:"active_was,omitempty"` - Actor *string `json:"actor,omitempty"` // The actor who performed the action. - BlockedUser *string `json:"blocked_user,omitempty"` - Business *string `json:"business,omitempty"` - CancelledAt *Timestamp `json:"cancelled_at,omitempty"` - CompletedAt *Timestamp `json:"completed_at,omitempty"` - Conclusion *string `json:"conclusion,omitempty"` - Config *HookConfig `json:"config,omitempty"` - ConfigWas *HookConfig `json:"config_was,omitempty"` - ContentType *string `json:"content_type,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - DeployKeyFingerprint *string `json:"deploy_key_fingerprint,omitempty"` - DocumentID *string `json:"_document_id,omitempty"` - Emoji *string `json:"emoji,omitempty"` - EnvironmentName *string `json:"environment_name,omitempty"` - Event *string `json:"event,omitempty"` - Events []string `json:"events,omitempty"` - EventsWere []string `json:"events_were,omitempty"` - Explanation *string `json:"explanation,omitempty"` - Fingerprint *string `json:"fingerprint,omitempty"` - HeadBranch *string `json:"head_branch,omitempty"` - HeadSHA *string `json:"head_sha,omitempty"` - HookID *int64 `json:"hook_id,omitempty"` - IsHostedRunner *bool `json:"is_hosted_runner,omitempty"` - JobName *string `json:"job_name,omitempty"` - LimitedAvailability *bool `json:"limited_availability,omitempty"` - Message *string `json:"message,omitempty"` - Name *string `json:"name,omitempty"` - OldUser *string `json:"old_user,omitempty"` - OpenSSHPublicKey *string `json:"openssh_public_key,omitempty"` - Org *string `json:"org,omitempty"` - PreviousVisibility *string `json:"previous_visibility,omitempty"` - ReadOnly *string `json:"read_only,omitempty"` - Repo *string `json:"repo,omitempty"` - Repository *string `json:"repository,omitempty"` - RepositoryPublic *bool `json:"repository_public,omitempty"` - RunnerGroupID *int64 `json:"runner_group_id,omitempty"` - RunnerGroupName *string `json:"runner_group_name,omitempty"` - RunnerID *int64 `json:"runner_id,omitempty"` - RunnerLabels []string `json:"runner_labels,omitempty"` - RunnerName *string `json:"runner_name,omitempty"` - SecretsPassed []string `json:"secrets_passed,omitempty"` - SourceVersion *string `json:"source_version,omitempty"` - StartedAt *Timestamp `json:"started_at,omitempty"` - TargetLogin *string `json:"target_login,omitempty"` - TargetVersion *string `json:"target_version,omitempty"` - Team *string `json:"team,omitempty"` - Timestamp *Timestamp `json:"@timestamp,omitempty"` // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time). - TransportProtocolName *string `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data. - TransportProtocol *int `json:"transport_protocol,omitempty"` // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data. - TriggerID *int64 `json:"trigger_id,omitempty"` - User *string `json:"user,omitempty"` // The user that was affected by the action performed (if available). - Visibility *string `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`. - WorkflowID *int64 `json:"workflow_id,omitempty"` - WorkflowRunID *int64 `json:"workflow_run_id,omitempty"` -} - -// GetAuditLog gets the audit-log entries for an organization. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization -func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { - u := fmt.Sprintf("orgs/%v/audit-log", org) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var auditEntries []*AuditEntry - resp, err := s.client.Do(ctx, req, &auditEntries) - if err != nil { - return nil, resp, err - } - - return auditEntries, resp, nil -} diff --git a/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go deleted file mode 100644 index 9904685b94..0000000000 --- a/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// OrganizationCustomRepoRoles represents custom repository roles available in specified organization. -type OrganizationCustomRepoRoles struct { - TotalCount *int `json:"total_count,omitempty"` - CustomRepoRoles []*CustomRepoRoles `json:"custom_roles,omitempty"` -} - -// CustomRepoRoles represents custom repository roles for an organization. -// See https://docs.github.com/en/enterprise-cloud@latest/organizations/managing-peoples-access-to-your-organization-with-roles/managing-custom-repository-roles-for-an-organization -// for more information. -type CustomRepoRoles struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` -} - -// ListCustomRepoRoles lists the custom repository roles available in this organization. -// In order to see custom repository roles in an organization, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#list-custom-repository-roles-in-an-organization -func (s *OrganizationsService) ListCustomRepoRoles(ctx context.Context, org string) (*OrganizationCustomRepoRoles, *Response, error) { - u := fmt.Sprintf("orgs/%v/custom_roles", org) - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - customRepoRoles := new(OrganizationCustomRepoRoles) - resp, err := s.client.Do(ctx, req, customRepoRoles) - if err != nil { - return nil, resp, err - } - - return customRepoRoles, resp, nil -} diff --git a/vendor/github.com/google/go-github/v45/AUTHORS b/vendor/github.com/google/go-github/v53/AUTHORS similarity index 82% rename from vendor/github.com/google/go-github/v45/AUTHORS rename to vendor/github.com/google/go-github/v53/AUTHORS index 80bd26dd4f..5e40cd1f38 100644 --- a/vendor/github.com/google/go-github/v45/AUTHORS +++ b/vendor/github.com/google/go-github/v53/AUTHORS @@ -11,7 +11,10 @@ 178inaba 2BFL 413x +Abed Kibbe Abhinav Gupta +Abhishek Veeramalla +aboy adrienzieba afdesk Ahmed Hagy @@ -24,15 +27,18 @@ Alec Thomas Aleks Clark Alex Bramley Alex Orr +Alex Su Alex Unger Alexander Harkness Alexis Gauthiez Ali Farooq +Allan Guwatudde Allen Sun Amey Sakhadeo Anders Janmyr Andreas Garnæs Andrew Ryabchun +Andrew Svoboda Andy Grunwald Andy Hume Andy Lindeman @@ -58,20 +64,26 @@ Beshr Kayali Beyang Liu Billy Keyes Billy Lynch +Bjorn Neergaard Björn Häuser boljen +Bracken Brad Harris Brad Moylan Bradley Falzon Bradley McAllister +Brandon Butler Brandon Cook +Brett Kuhlman Brett Logan Brian Egizi Bryan Boreham +Bryan Peterson Cami Diez Carl Johnson Carlos Alexandro Becker Carlos Tadeu Panato Junior +ChandanChainani chandresh-pancholi Charles Fenwick Elliott Charlie Yan @@ -82,12 +94,17 @@ Chris Raborg Chris Roche Chris Schaefer chrisforrette +Christian Bargmann Christian Muehlhaeuser Christoph Sassenberg +CI Monk Colin Misare +Craig Gumbley Craig Peterson Cristian Maglie +Cyb3r Jak3 Daehyeok Mun +Dalton Hubble Daniel Lanner Daniel Leavitt Daniel Nilsson @@ -97,6 +114,7 @@ Dave Henderson Dave Perrett Dave Protasowski David Deng +David Gamba David J. M. Karlsen David Jannotta David Ji @@ -104,6 +122,7 @@ David Lopez Reyes Davide Zipeto Dennis Webb Derek Jobst +DeviousLab Dhi Aurrahman Diego Lapiduz Dmitri Shuralyov @@ -116,6 +135,8 @@ Eivind Eli Uriegas Elliott Beach Emerson Wood +Emil V +Eng Zer Jun eperm Erick Fejta Erik Nobel @@ -123,13 +144,17 @@ erwinvaneyk Evan Elias Fabian Holler Fabrice +Fatema-Moaiyadi Felix Geisendörfer Filippo Valsorda Florian Forster +Florian Wagner Francesc Gil Francis Francisco Guimarães +François de Metz Fredrik Jönsson +Gabriel Garrett Squire George Kontridze Georgy Buranov @@ -143,11 +168,15 @@ Guz Alexander Guðmundur Bjarni Ólafsson Hanno Hecker Hari haran +Harikesh00 haya14busa haya14busa +Hiroki Ito +Hubot Jr Huy Tr huydx i2bskn +Iain Steers Ikko Ashimine Ioannis Georgoulas Isao Jonas @@ -157,8 +186,10 @@ Jacob Valdemar Jake Krammer Jake White Jameel Haffejee +James Bowes James Cockbain James Loh +Jamie West Jan Kosecki Jan Å vábík Javier Campanini @@ -168,15 +199,18 @@ Jeremy Morris Jesse Haka Jesse Newland Jihoon Chung +Jille Timmermans Jimmi Dyson Joan Saum Joe Tsai John Barton John Engelman +John Jones John Liu Jordan Brockopp Jordan Sussman Joshua Bezaleel Abednego +João Cerqueira JP Phillips jpbelanger-mtl Juan @@ -185,25 +219,34 @@ Julien Garcia Gonzalez Julien Rostand Junya Kono Justin Abrahms +Justin Toh Jusung Lee jzhoucliqr +k1rnt kadern0 Katrina Owen Kautilya Tripathi Keita Urashima Kevin Burke +Kevin Wang +Kevin Zhao Kirill Konrad Malawski Kookheon Kwon +Krishna Indani Krzysztof Kowalczyk Kshitij Saraogi Kumar Saurabh +Kyle Kurz kyokomi Laurent Verdoïa +leopoldwang Liam Galvin +Lluis Campos Lovro Mažgon Luca Campese Lucas Alcantara +Luis Davim Luke Evers Luke Kysow Luke Roberts @@ -219,6 +262,7 @@ Martins Sipenko Marwan Sulaiman Masayuki Izumi Mat Geist +Matija Horvat Matin Rahmanian Matt Matt Brender @@ -226,12 +270,17 @@ Matt Gaunt Matt Landis Matt Moore Maxime Bury +Michael Meng Michael Spiegel Michael Tiller MichaÅ‚ Glapa Michelangelo Morrillo +Miguel Elias dos Santos +Mike Chen +Mohammed AlDujaili Mukundan Senthil Munia Balayil +Mustafa Abban Nadav Kaner Nathan VanBenschoten Navaneeth Suresh @@ -241,6 +290,7 @@ Nick Platt Nick Spragg Nikhita Raghunath Nilesh Singh +Noah Hanjun Lee Noah Zoschke ns-cweber Ole Orhagen @@ -251,13 +301,17 @@ Pablo Pérez Schröder Palash Nigam Panagiotis Moustafellos Parham Alvani +pari-27 Parker Moore parkhyukjun89 +Pat Alwell Patrick DeVivo Patrick Marabeas +Pavel Dvoinos Pavel Shtanko Pete Wagner Petr Shevtsov +Pierce McEntagart Pierre Carrier Piotr Zurek Piyush Chugh @@ -271,18 +325,23 @@ Radek Simko RadliÅ„ski Ignacy Rajat Jindal Rajendra arora +Rajkumar Ranbir Singh Ravi Shekhar Jethani RaviTeja Pothana rc1140 Red Hat, Inc. Reetuparna Mukherjee +reeves122 Reinier Timmer Renjith R Ricco Førgaard +Richard de Vries Rob Figueiredo Rohit Upadhyay +Rojan Dinc Ronak Jain +Ronan Pelliard Ross Gustafson Ruben Vereecken Russell Boley @@ -298,10 +357,13 @@ Sandeep Sukhani Sander Knape Sander van Harmelen Sanket Payghan +Sarah Funkhouser Sarasa Kisaragi +Sasha Melentyev Sean Wang Sebastian Mandrean Sebastian Mæland Pedersen +Sergei Popinevskii Sergey Romanov Sergio Garcia Seth Vargo @@ -311,6 +373,7 @@ shakeelrao Shawn Catanzarite Shawn Smith Shibasis Patel +Sho Okada Shrikrishna Singh Simon Davis sona-tar @@ -321,18 +384,27 @@ Stefan Sedich Steve Teuber Stian Eikeland Suhaib Mujahid +sushmita wable Szymon Kodrebski Søren Hansen +Takashi Yoneuchi Takayuki Watanabe Taketoshi Fujiwara Taketoshi Fujiwara +Takuma Kajikawa Tasya Aditya Rukmana Theo Henson +Theofilos Petsios Thomas Aidan Curran Thomas Bruyelle +Tim Rogers Timothée Peignier +Tingluo Huang tkhandel +Tobias Gesellchen +Tom Payne Trey Tacon +tsbkw ttacon Vaibhav Singh Varadarajan Aravamudhan @@ -346,12 +418,16 @@ Will Maier Willem D'Haeseleer William Bailey William Cooke +Xabi xibz Yann Malet Yannick Utard Yicheng Qin Yosuke Akatsuka Yumikiyo Osanai +Yusef Mohamadi Yusuke Kuoka Zach Latta -zhouhaibing089 \ No newline at end of file +zhouhaibing089 +六开箱 +缘生 diff --git a/vendor/github.com/google/go-github/v45/LICENSE b/vendor/github.com/google/go-github/v53/LICENSE similarity index 100% rename from vendor/github.com/google/go-github/v45/LICENSE rename to vendor/github.com/google/go-github/v53/LICENSE diff --git a/vendor/github.com/google/go-github/v45/github/actions.go b/vendor/github.com/google/go-github/v53/github/actions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/actions.go rename to vendor/github.com/google/go-github/v53/github/actions.go diff --git a/vendor/github.com/google/go-github/v45/github/actions_artifacts.go b/vendor/github.com/google/go-github/v53/github/actions_artifacts.go similarity index 75% rename from vendor/github.com/google/go-github/v45/github/actions_artifacts.go rename to vendor/github.com/google/go-github/v53/github/actions_artifacts.go index 3b9c83c490..441a53910e 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_artifacts.go +++ b/vendor/github.com/google/go-github/v53/github/actions_artifacts.go @@ -12,20 +12,34 @@ import ( "net/url" ) -// Artifact reprents a GitHub artifact. Artifacts allow sharing +// ArtifactWorkflowRun represents a GitHub artifact's workflow run. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts +type ArtifactWorkflowRun struct { + ID *int64 `json:"id,omitempty"` + RepositoryID *int64 `json:"repository_id,omitempty"` + HeadRepositoryID *int64 `json:"head_repository_id,omitempty"` + HeadBranch *string `json:"head_branch,omitempty"` + HeadSHA *string `json:"head_sha,omitempty"` +} + +// Artifact represents a GitHub artifact. Artifacts allow sharing // data between jobs in a workflow and provide storage for data // once a workflow is complete. // // GitHub API docs: https://docs.github.com/en/rest/actions/artifacts type Artifact struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - SizeInBytes *int64 `json:"size_in_bytes,omitempty"` - ArchiveDownloadURL *string `json:"archive_download_url,omitempty"` - Expired *bool `json:"expired,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - ExpiresAt *Timestamp `json:"expires_at,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Name *string `json:"name,omitempty"` + SizeInBytes *int64 `json:"size_in_bytes,omitempty"` + URL *string `json:"url,omitempty"` + ArchiveDownloadURL *string `json:"archive_download_url,omitempty"` + Expired *bool `json:"expired,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + ExpiresAt *Timestamp `json:"expires_at,omitempty"` + WorkflowRun *ArtifactWorkflowRun `json:"workflow_run,omitempty"` } // ArtifactList represents a list of GitHub artifacts. @@ -121,6 +135,10 @@ func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo strin } parsedURL, err := url.Parse(resp.Header.Get("Location")) + if err != nil { + return nil, newResponse(resp), err + } + return parsedURL, newResponse(resp), nil } diff --git a/vendor/github.com/google/go-github/v53/github/actions_cache.go b/vendor/github.com/google/go-github/v53/github/actions_cache.go new file mode 100644 index 0000000000..9592d9ab62 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/actions_cache.go @@ -0,0 +1,235 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// ActionsCache represents a GitHub action cache. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#about-the-cache-api +type ActionsCache struct { + ID *int64 `json:"id,omitempty" url:"-"` + Ref *string `json:"ref,omitempty" url:"ref"` + Key *string `json:"key,omitempty" url:"key"` + Version *string `json:"version,omitempty" url:"-"` + LastAccessedAt *Timestamp `json:"last_accessed_at,omitempty" url:"-"` + CreatedAt *Timestamp `json:"created_at,omitempty" url:"-"` + SizeInBytes *int64 `json:"size_in_bytes,omitempty" url:"-"` +} + +// ActionsCacheList represents a list of GitHub actions Cache. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository +type ActionsCacheList struct { + TotalCount int `json:"total_count"` + ActionsCaches []*ActionsCache `json:"actions_caches,omitempty"` +} + +// ActionsCacheUsage represents a GitHub Actions Cache Usage object. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository +type ActionsCacheUsage struct { + FullName string `json:"full_name"` + ActiveCachesSizeInBytes int64 `json:"active_caches_size_in_bytes"` + ActiveCachesCount int `json:"active_caches_count"` +} + +// ActionsCacheUsageList represents a list of repositories with GitHub Actions cache usage for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository +type ActionsCacheUsageList struct { + TotalCount int `json:"total_count"` + RepoCacheUsage []*ActionsCacheUsage `json:"repository_cache_usages,omitempty"` +} + +// TotalCacheUsage represents total GitHub actions cache usage of an organization or enterprise. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-enterprise +type TotalCacheUsage struct { + TotalActiveCachesUsageSizeInBytes int64 `json:"total_active_caches_size_in_bytes"` + TotalActiveCachesCount int `json:"total_active_caches_count"` +} + +// ActionsCacheListOptions represents a list of all possible optional Query parameters for ListCaches method. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository +type ActionsCacheListOptions struct { + ListOptions + // The Git reference for the results you want to list. + // The ref for a branch can be formatted either as refs/heads/ + // or simply . To reference a pull request use refs/pull//merge + Ref *string `url:"ref,omitempty"` + Key *string `url:"key,omitempty"` + // Can be one of: "created_at", "last_accessed_at", "size_in_bytes". Default: "last_accessed_at" + Sort *string `url:"sort,omitempty"` + // Can be one of: "asc", "desc" Default: desc + Direction *string `url:"direction,omitempty"` +} + +// ListCaches lists the GitHub Actions caches for a repository. +// You must authenticate using an access token with the repo scope to use this endpoint. +// +// Permissions: must have the actions:read permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-github-actions-caches-for-a-repository +func (s *ActionsService) ListCaches(ctx context.Context, owner, repo string, opts *ActionsCacheListOptions) (*ActionsCacheList, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/caches", owner, repo) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + actionCacheList := new(ActionsCacheList) + resp, err := s.client.Do(ctx, req, actionCacheList) + if err != nil { + return nil, resp, err + } + + return actionCacheList, resp, nil +} + +// DeleteCachesByKey deletes one or more GitHub Actions caches for a repository, using a complete cache key. +// By default, all caches that match the provided key are deleted, but you can optionally provide +// a Git ref to restrict deletions to caches that match both the provided key and the Git ref. +// The ref for a branch can be formatted either as "refs/heads/" or simply "". +// To reference a pull request use "refs/pull//merge". If you don't want to use ref just pass nil in parameter. +// +// Permissions: You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have the actions:write permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-github-actions-caches-for-a-repository-using-a-cache-key +func (s *ActionsService) DeleteCachesByKey(ctx context.Context, owner, repo, key string, ref *string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/caches", owner, repo) + u, err := addOptions(u, ActionsCache{Key: &key, Ref: ref}) + if err != nil { + return nil, err + } + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// DeleteCachesByID deletes a GitHub Actions cache for a repository, using a cache ID. +// +// Permissions: You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have the actions:write permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#delete-a-github-actions-cache-for-a-repository-using-a-cache-id +func (s *ActionsService) DeleteCachesByID(ctx context.Context, owner, repo string, cacheID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/caches/%v", owner, repo, cacheID) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// GetCacheUsageForRepo gets GitHub Actions cache usage for a repository. The data fetched using this API is refreshed approximately every 5 minutes, +// so values returned from this endpoint may take at least 5 minutes to get updated. +// +// Permissions: Anyone with read access to the repository can use this endpoint. If the repository is private, you must use an +// access token with the repo scope. GitHub Apps must have the actions:read permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-a-repository +func (s *ActionsService) GetCacheUsageForRepo(ctx context.Context, owner, repo string) (*ActionsCacheUsage, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/cache/usage", owner, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + cacheUsage := new(ActionsCacheUsage) + res, err := s.client.Do(ctx, req, cacheUsage) + if err != nil { + return nil, res, err + } + + return cacheUsage, res, err +} + +// ListCacheUsageByRepoForOrg lists repositories and their GitHub Actions cache usage for an organization. The data fetched using this API is +// refreshed approximately every 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. +// +// Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. +// GitHub Apps must have the organization_admistration:read permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#list-repositories-with-github-actions-cache-usage-for-an-organization +func (s *ActionsService) ListCacheUsageByRepoForOrg(ctx context.Context, org string, opts *ListOptions) (*ActionsCacheUsageList, *Response, error) { + u := fmt.Sprintf("orgs/%v/actions/cache/usage-by-repository", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + cacheUsage := new(ActionsCacheUsageList) + res, err := s.client.Do(ctx, req, cacheUsage) + if err != nil { + return nil, res, err + } + + return cacheUsage, res, err +} + +// GetTotalCacheUsageForOrg gets the total GitHub Actions cache usage for an organization. The data fetched using this API is refreshed approximately every +// 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. +// +// Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. +// GitHub Apps must have the organization_admistration:read permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-organization +func (s *ActionsService) GetTotalCacheUsageForOrg(ctx context.Context, org string) (*TotalCacheUsage, *Response, error) { + u := fmt.Sprintf("orgs/%v/actions/cache/usage", org) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + cacheUsage := new(TotalCacheUsage) + res, err := s.client.Do(ctx, req, cacheUsage) + if err != nil { + return nil, res, err + } + + return cacheUsage, res, err +} + +// GetTotalCacheUsageForEnterprise gets the total GitHub Actions cache usage for an enterprise. The data fetched using this API is refreshed approximately every 5 minutes, +// so values returned from this endpoint may take at least 5 minutes to get updated. +// +// Permissions: You must authenticate using an access token with the "admin:enterprise" scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/cache?apiVersion=2022-11-28#get-github-actions-cache-usage-for-an-enterprise +func (s *ActionsService) GetTotalCacheUsageForEnterprise(ctx context.Context, enterprise string) (*TotalCacheUsage, *Response, error) { + u := fmt.Sprintf("enterprises/%v/actions/cache/usage", enterprise) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + cacheUsage := new(TotalCacheUsage) + res, err := s.client.Do(ctx, req, cacheUsage) + if err != nil { + return nil, res, err + } + + return cacheUsage, res, err +} diff --git a/vendor/github.com/google/go-github/v53/github/actions_oidc.go b/vendor/github.com/google/go-github/v53/github/actions_oidc.go new file mode 100644 index 0000000000..b7f2d26ae9 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/actions_oidc.go @@ -0,0 +1,73 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// OIDCSubjectClaimCustomTemplate represents an OIDC subject claim customization template. +type OIDCSubjectClaimCustomTemplate struct { + UseDefault *bool `json:"use_default,omitempty"` + IncludeClaimKeys []string `json:"include_claim_keys,omitempty"` +} + +// GetOrgOIDCSubjectClaimCustomTemplate gets the subject claim customization template for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#get-the-customization-template-for-an-oidc-subject-claim-for-an-organization +func (s *ActionsService) GetOrgOIDCSubjectClaimCustomTemplate(ctx context.Context, org string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { + u := fmt.Sprintf("orgs/%v/actions/oidc/customization/sub", org) + return s.getOIDCSubjectClaimCustomTemplate(ctx, u) +} + +// GetRepoOIDCSubjectClaimCustomTemplate gets the subject claim customization template for a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#get-the-customization-template-for-an-oidc-subject-claim-for-a-repository +func (s *ActionsService) GetRepoOIDCSubjectClaimCustomTemplate(ctx context.Context, owner, repo string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/oidc/customization/sub", owner, repo) + return s.getOIDCSubjectClaimCustomTemplate(ctx, u) +} + +func (s *ActionsService) getOIDCSubjectClaimCustomTemplate(ctx context.Context, url string) (*OIDCSubjectClaimCustomTemplate, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + tmpl := new(OIDCSubjectClaimCustomTemplate) + resp, err := s.client.Do(ctx, req, tmpl) + if err != nil { + return nil, resp, err + } + + return tmpl, resp, nil +} + +// SetOrgOIDCSubjectClaimCustomTemplate sets the subject claim customization for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-an-organization +func (s *ActionsService) SetOrgOIDCSubjectClaimCustomTemplate(ctx context.Context, org string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { + u := fmt.Sprintf("orgs/%v/actions/oidc/customization/sub", org) + return s.setOIDCSubjectClaimCustomTemplate(ctx, u, template) +} + +// SetRepoOIDCSubjectClaimCustomTemplate sets the subject claim customization for a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/oidc#set-the-customization-template-for-an-oidc-subject-claim-for-a-repository +func (s *ActionsService) SetRepoOIDCSubjectClaimCustomTemplate(ctx context.Context, owner, repo string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/oidc/customization/sub", owner, repo) + return s.setOIDCSubjectClaimCustomTemplate(ctx, u, template) +} + +func (s *ActionsService) setOIDCSubjectClaimCustomTemplate(ctx context.Context, url string, template *OIDCSubjectClaimCustomTemplate) (*Response, error) { + req, err := s.client.NewRequest("PUT", url, template) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go b/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go new file mode 100644 index 0000000000..3566eb9d20 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/actions_required_workflows.go @@ -0,0 +1,247 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// OrgRequiredWorkflow represents a required workflow object at the org level. +type OrgRequiredWorkflow struct { + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Path *string `json:"path,omitempty"` + Scope *string `json:"scope,omitempty"` + Ref *string `json:"ref,omitempty"` + State *string `json:"state,omitempty"` + SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + Repository *Repository `json:"repository,omitempty"` +} + +// OrgRequiredWorkflows represents the required workflows for the org. +type OrgRequiredWorkflows struct { + TotalCount *int `json:"total_count,omitempty"` + RequiredWorkflows []*OrgRequiredWorkflow `json:"required_workflows,omitempty"` +} + +// CreateUpdateRequiredWorkflowOptions represents the input object used to create or update required workflows. +type CreateUpdateRequiredWorkflowOptions struct { + WorkflowFilePath *string `json:"workflow_file_path,omitempty"` + RepositoryID *int64 `json:"repository_id,omitempty"` + Scope *string `json:"scope,omitempty"` + SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"` +} + +// RequiredWorkflowSelectedRepos represents the repos that a required workflow is applied to. +type RequiredWorkflowSelectedRepos struct { + TotalCount *int `json:"total_count,omitempty"` + Repositories []*Repository `json:"repositories,omitempty"` +} + +// RepoRequiredWorkflow represents a required workflow object at the repo level. +type RepoRequiredWorkflow struct { + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Name *string `json:"name,omitempty"` + Path *string `json:"path,omitempty"` + State *string `json:"state,omitempty"` + URL *string `json:"url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + BadgeURL *string `json:"badge_url,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + SourceRepository *Repository `json:"source_repository,omitempty"` +} + +// RepoRequiredWorkflows represents the required workflows for a repo. +type RepoRequiredWorkflows struct { + TotalCount *int `json:"total_count,omitempty"` + RequiredWorkflows []*RepoRequiredWorkflow `json:"required_workflows,omitempty"` +} + +// ListOrgRequiredWorkflows lists the RequiredWorkflows for an org. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows +func (s *ActionsService) ListOrgRequiredWorkflows(ctx context.Context, org string, opts *ListOptions) (*OrgRequiredWorkflows, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows", org) + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + requiredWorkflows := new(OrgRequiredWorkflows) + resp, err := s.client.Do(ctx, req, &requiredWorkflows) + if err != nil { + return nil, resp, err + } + + return requiredWorkflows, resp, nil +} + +// CreateRequiredWorkflow creates the required workflow in an org. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#create-a-required-workflow +func (s *ActionsService) CreateRequiredWorkflow(ctx context.Context, org string, createRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows", org) + req, err := s.client.NewRequest("POST", url, createRequiredWorkflowOptions) + if err != nil { + return nil, nil, err + } + + orgRequiredWorkflow := new(OrgRequiredWorkflow) + resp, err := s.client.Do(ctx, req, orgRequiredWorkflow) + if err != nil { + return nil, resp, err + } + + return orgRequiredWorkflow, resp, nil +} + +// GetRequiredWorkflowByID get the RequiredWorkflows for an org by its ID. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-required-workflows +func (s *ActionsService) GetRequiredWorkflowByID(ctx context.Context, owner string, requiredWorkflowID int64) (*OrgRequiredWorkflow, *Response, error) { + u := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", owner, requiredWorkflowID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + requiredWorkflow := new(OrgRequiredWorkflow) + resp, err := s.client.Do(ctx, req, &requiredWorkflow) + if err != nil { + return nil, resp, err + } + + return requiredWorkflow, resp, nil +} + +// UpdateRequiredWorkflow updates a required workflow in an org. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow +func (s *ActionsService) UpdateRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64, updateRequiredWorkflowOptions *CreateUpdateRequiredWorkflowOptions) (*OrgRequiredWorkflow, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID) + req, err := s.client.NewRequest("PATCH", url, updateRequiredWorkflowOptions) + if err != nil { + return nil, nil, err + } + + orgRequiredWorkflow := new(OrgRequiredWorkflow) + resp, err := s.client.Do(ctx, req, orgRequiredWorkflow) + if err != nil { + return nil, resp, err + } + + return orgRequiredWorkflow, resp, nil +} + +// DeleteRequiredWorkflow deletes a required workflow in an org. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#update-a-required-workflow +func (s *ActionsService) DeleteRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID int64) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v", org, requiredWorkflowID) + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + +// ListRequiredWorkflowSelectedRepos lists the Repositories selected for a workflow. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-selected-repositories-for-a-required-workflow +func (s *ActionsService) ListRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, opts *ListOptions) (*RequiredWorkflowSelectedRepos, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID) + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + requiredWorkflowRepos := new(RequiredWorkflowSelectedRepos) + resp, err := s.client.Do(ctx, req, &requiredWorkflowRepos) + if err != nil { + return nil, resp, err + } + + return requiredWorkflowRepos, resp, nil +} + +// SetRequiredWorkflowSelectedRepos sets the Repositories selected for a workflow. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#sets-repositories-for-a-required-workflow +func (s *ActionsService) SetRequiredWorkflowSelectedRepos(ctx context.Context, org string, requiredWorkflowID int64, ids SelectedRepoIDs) (*Response, error) { + type repoIDs struct { + SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` + } + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories", org, requiredWorkflowID) + req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// AddRepoToRequiredWorkflow adds the Repository to a required workflow. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow +func (s *ActionsService) AddRepoToRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID) + req, err := s.client.NewRequest("PUT", url, nil) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + +// RemoveRepoFromRequiredWorkflow removes the Repository from a required workflow. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#add-a-repository-to-a-required-workflow +func (s *ActionsService) RemoveRepoFromRequiredWorkflow(ctx context.Context, org string, requiredWorkflowID, repoID int64) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/required_workflows/%v/repositories/%v", org, requiredWorkflowID, repoID) + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + +// ListRepoRequiredWorkflows lists the RequiredWorkflows for a repo. +// +// Github API docs:https://docs.github.com/en/rest/actions/required-workflows?apiVersion=2022-11-28#list-repository-required-workflows +func (s *ActionsService) ListRepoRequiredWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*RepoRequiredWorkflows, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/required_workflows", owner, repo) + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + requiredWorkflows := new(RepoRequiredWorkflows) + resp, err := s.client.Do(ctx, req, &requiredWorkflows) + if err != nil { + return nil, resp, err + } + + return requiredWorkflows, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/actions_runner_groups.go b/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go similarity index 87% rename from vendor/github.com/google/go-github/v45/github/actions_runner_groups.go rename to vendor/github.com/google/go-github/v53/github/actions_runner_groups.go index 6d89249150..00b9b6ce09 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_runner_groups.go +++ b/vendor/github.com/google/go-github/v53/github/actions_runner_groups.go @@ -12,14 +12,17 @@ import ( // RunnerGroup represents a self-hosted runner group configured in an organization. type RunnerGroup struct { - ID *int64 `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Visibility *string `json:"visibility,omitempty"` - Default *bool `json:"default,omitempty"` - SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` - RunnersURL *string `json:"runners_url,omitempty"` - Inherited *bool `json:"inherited,omitempty"` - AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Visibility *string `json:"visibility,omitempty"` + Default *bool `json:"default,omitempty"` + SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` + RunnersURL *string `json:"runners_url,omitempty"` + Inherited *bool `json:"inherited,omitempty"` + AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` + RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` + SelectedWorkflows []string `json:"selected_workflows,omitempty"` + WorkflowRestrictionsReadOnly *bool `json:"workflow_restrictions_read_only,omitempty"` } // RunnerGroups represents a collection of self-hosted runner groups configured for an organization. @@ -38,13 +41,19 @@ type CreateRunnerGroupRequest struct { Runners []int64 `json:"runners,omitempty"` // If set to True, public repos can use this runner group AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` + // If true, the runner group will be restricted to running only the workflows specified in the SelectedWorkflows slice. + RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` + // List of workflows the runner group should be allowed to run. This setting will be ignored unless RestrictedToWorkflows is set to true. + SelectedWorkflows []string `json:"selected_workflows,omitempty"` } // UpdateRunnerGroupRequest represents a request to update a Runner group for an organization. type UpdateRunnerGroupRequest struct { - Name *string `json:"name,omitempty"` - Visibility *string `json:"visibility,omitempty"` - AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` + Name *string `json:"name,omitempty"` + Visibility *string `json:"visibility,omitempty"` + AllowsPublicRepositories *bool `json:"allows_public_repositories,omitempty"` + RestrictedToWorkflows *bool `json:"restricted_to_workflows,omitempty"` + SelectedWorkflows []string `json:"selected_workflows,omitempty"` } // SetRepoAccessRunnerGroupRequest represents a request to replace the list of repositories diff --git a/vendor/github.com/google/go-github/v45/github/actions_runners.go b/vendor/github.com/google/go-github/v53/github/actions_runners.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/actions_runners.go rename to vendor/github.com/google/go-github/v53/github/actions_runners.go diff --git a/vendor/github.com/google/go-github/v45/github/actions_secrets.go b/vendor/github.com/google/go-github/v53/github/actions_secrets.go similarity index 99% rename from vendor/github.com/google/go-github/v45/github/actions_secrets.go rename to vendor/github.com/google/go-github/v53/github/actions_secrets.go index dc057edba2..316badb70d 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_secrets.go +++ b/vendor/github.com/google/go-github/v53/github/actions_secrets.go @@ -186,7 +186,7 @@ func (s *ActionsService) GetEnvSecret(ctx context.Context, repoID int, env, secr return s.getSecret(ctx, url) } -// SelectedRepoIDs are the repository IDs that have access to the secret. +// SelectedRepoIDs are the repository IDs that have access to the actions secrets. type SelectedRepoIDs []int64 // EncryptedSecret represents a secret that is encrypted using a public key. diff --git a/vendor/github.com/google/go-github/v53/github/actions_variables.go b/vendor/github.com/google/go-github/v53/github/actions_variables.go new file mode 100644 index 0000000000..29445edd04 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/actions_variables.go @@ -0,0 +1,293 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// ActionsVariable represents a repository action variable. +type ActionsVariable struct { + Name string `json:"name"` + Value string `json:"value"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + Visibility *string `json:"visibility,omitempty"` + // Used by ListOrgVariables and GetOrgVariables + SelectedRepositoriesURL *string `json:"selected_repositories_url,omitempty"` + // Used by UpdateOrgVariable and CreateOrgVariable + SelectedRepositoryIDs *SelectedRepoIDs `json:"selected_repository_ids,omitempty"` +} + +// ActionsVariables represents one item from the ListVariables response. +type ActionsVariables struct { + TotalCount int `json:"total_count"` + Variables []*ActionsVariable `json:"variables"` +} + +func (s *ActionsService) listVariables(ctx context.Context, url string, opts *ListOptions) (*ActionsVariables, *Response, error) { + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + variables := new(ActionsVariables) + resp, err := s.client.Do(ctx, req, &variables) + if err != nil { + return nil, resp, err + } + + return variables, resp, nil +} + +// ListRepoVariables lists all variables available in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-repository-variables +func (s *ActionsService) ListRepoVariables(ctx context.Context, owner, repo string, opts *ListOptions) (*ActionsVariables, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/variables", owner, repo) + return s.listVariables(ctx, url, opts) +} + +// ListOrgVariables lists all variables available in an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-organization-variables +func (s *ActionsService) ListOrgVariables(ctx context.Context, org string, opts *ListOptions) (*ActionsVariables, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables", org) + return s.listVariables(ctx, url, opts) +} + +// ListEnvVariables lists all variables available in an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-environment-variables +func (s *ActionsService) ListEnvVariables(ctx context.Context, repoID int, env string, opts *ListOptions) (*ActionsVariables, *Response, error) { + url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) + return s.listVariables(ctx, url, opts) +} + +func (s *ActionsService) getVariable(ctx context.Context, url string) (*ActionsVariable, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + variable := new(ActionsVariable) + resp, err := s.client.Do(ctx, req, variable) + if err != nil { + return nil, resp, err + } + + return variable, resp, nil +} + +// GetRepoVariable gets a single repository variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-a-repository-variable +func (s *ActionsService) GetRepoVariable(ctx context.Context, owner, repo, name string) (*ActionsVariable, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, name) + return s.getVariable(ctx, url) +} + +// GetOrgVariable gets a single organization variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-an-organization-variable +func (s *ActionsService) GetOrgVariable(ctx context.Context, org, name string) (*ActionsVariable, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, name) + return s.getVariable(ctx, url) +} + +// GetEnvVariable gets a single environment variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#get-an-environment-variable +func (s *ActionsService) GetEnvVariable(ctx context.Context, repoID int, env, variableName string) (*ActionsVariable, *Response, error) { + url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) + return s.getVariable(ctx, url) +} + +func (s *ActionsService) postVariable(ctx context.Context, url string, variable *ActionsVariable) (*Response, error) { + req, err := s.client.NewRequest("POST", url, variable) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + +// CreateRepoVariable creates a repository variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-a-repository-variable +func (s *ActionsService) CreateRepoVariable(ctx context.Context, owner, repo string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/variables", owner, repo) + return s.postVariable(ctx, url, variable) +} + +// CreateOrgVariable creates an organization variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-organization-variable +func (s *ActionsService) CreateOrgVariable(ctx context.Context, org string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables", org) + return s.postVariable(ctx, url, variable) +} + +// CreateEnvVariable creates an environment variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-environment-variable +func (s *ActionsService) CreateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repositories/%v/environments/%v/variables", repoID, env) + return s.postVariable(ctx, url, variable) +} + +func (s *ActionsService) patchVariable(ctx context.Context, url string, variable *ActionsVariable) (*Response, error) { + req, err := s.client.NewRequest("PATCH", url, variable) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + +// UpdateRepoVariable updates a repository variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#update-a-repository-variable +func (s *ActionsService) UpdateRepoVariable(ctx context.Context, owner, repo string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, variable.Name) + return s.patchVariable(ctx, url, variable) +} + +// UpdateOrgVariable updates an organization variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#update-an-organization-variable +func (s *ActionsService) UpdateOrgVariable(ctx context.Context, org string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, variable.Name) + return s.patchVariable(ctx, url, variable) +} + +// UpdateEnvVariable updates an environment variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#create-an-environment-variable +func (s *ActionsService) UpdateEnvVariable(ctx context.Context, repoID int, env string, variable *ActionsVariable) (*Response, error) { + url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variable.Name) + return s.patchVariable(ctx, url, variable) +} + +func (s *ActionsService) deleteVariable(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// DeleteRepoVariable deletes a variable in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-a-repository-variable +func (s *ActionsService) DeleteRepoVariable(ctx context.Context, owner, repo, name string) (*Response, error) { + url := fmt.Sprintf("repos/%v/%v/actions/variables/%v", owner, repo, name) + return s.deleteVariable(ctx, url) +} + +// DeleteOrgVariable deletes a variable in an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-an-organization-variable +func (s *ActionsService) DeleteOrgVariable(ctx context.Context, org, name string) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v", org, name) + return s.deleteVariable(ctx, url) +} + +// DeleteEnvVariable deletes a variable in an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#delete-an-environment-variable +func (s *ActionsService) DeleteEnvVariable(ctx context.Context, repoID int, env, variableName string) (*Response, error) { + url := fmt.Sprintf("repositories/%v/environments/%v/variables/%v", repoID, env, variableName) + return s.deleteVariable(ctx, url) +} + +func (s *ActionsService) listSelectedReposForVariable(ctx context.Context, url string, opts *ListOptions) (*SelectedReposList, *Response, error) { + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + result := new(SelectedReposList) + resp, err := s.client.Do(ctx, req, result) + if err != nil { + return nil, resp, err + } + + return result, resp, nil +} + +// ListSelectedReposForOrgVariable lists all repositories that have access to a variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#list-selected-repositories-for-an-organization-variable +func (s *ActionsService) ListSelectedReposForOrgVariable(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories", org, name) + return s.listSelectedReposForVariable(ctx, url, opts) +} + +func (s *ActionsService) setSelectedReposForVariable(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) { + type repoIDs struct { + SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` + } + + req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// SetSelectedReposForOrgVariable sets the repositories that have access to a variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#set-selected-repositories-for-an-organization-variable +func (s *ActionsService) SetSelectedReposForOrgVariable(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories", org, name) + return s.setSelectedReposForVariable(ctx, url, ids) +} + +func (s *ActionsService) addSelectedRepoToVariable(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("PUT", url, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// AddSelectedRepoToOrgVariable adds a repository to an organization variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-variable +func (s *ActionsService) AddSelectedRepoToOrgVariable(ctx context.Context, org, name string, repo *Repository) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories/%v", org, name, *repo.ID) + return s.addSelectedRepoToVariable(ctx, url) +} + +func (s *ActionsService) removeSelectedRepoFromVariable(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// RemoveSelectedRepoFromOrgVariable removes a repository from an organization variable. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/variables#remove-selected-repository-from-an-organization-variable +func (s *ActionsService) RemoveSelectedRepoFromOrgVariable(ctx context.Context, org, name string, repo *Repository) (*Response, error) { + url := fmt.Sprintf("orgs/%v/actions/variables/%v/repositories/%v", org, name, *repo.ID) + return s.removeSelectedRepoFromVariable(ctx, url) +} diff --git a/vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go b/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go similarity index 95% rename from vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go rename to vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go index 2867e82af0..1f018b3e48 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go +++ b/vendor/github.com/google/go-github/v53/github/actions_workflow_jobs.go @@ -28,11 +28,13 @@ type WorkflowJob struct { RunID *int64 `json:"run_id,omitempty"` RunURL *string `json:"run_url,omitempty"` NodeID *string `json:"node_id,omitempty"` + HeadBranch *string `json:"head_branch,omitempty"` HeadSHA *string `json:"head_sha,omitempty"` URL *string `json:"url,omitempty"` HTMLURL *string `json:"html_url,omitempty"` Status *string `json:"status,omitempty"` Conclusion *string `json:"conclusion,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` StartedAt *Timestamp `json:"started_at,omitempty"` CompletedAt *Timestamp `json:"completed_at,omitempty"` Name *string `json:"name,omitempty"` @@ -44,6 +46,8 @@ type WorkflowJob struct { RunnerName *string `json:"runner_name,omitempty"` RunnerGroupID *int64 `json:"runner_group_id,omitempty"` RunnerGroupName *string `json:"runner_group_name,omitempty"` + RunAttempt *int64 `json:"run_attempt,omitempty"` + WorkflowName *string `json:"workflow_name,omitempty"` } // Jobs represents a slice of repository action workflow job. diff --git a/vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go similarity index 80% rename from vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go rename to vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go index 18fdd57d6a..61f736be4f 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go +++ b/vendor/github.com/google/go-github/v53/github/actions_workflow_runs.go @@ -22,6 +22,7 @@ type WorkflowRun struct { RunNumber *int `json:"run_number,omitempty"` RunAttempt *int `json:"run_attempt,omitempty"` Event *string `json:"event,omitempty"` + DisplayTitle *string `json:"display_title,omitempty"` Status *string `json:"status,omitempty"` Conclusion *string `json:"conclusion,omitempty"` WorkflowID *int64 `json:"workflow_id,omitempty"` @@ -55,26 +56,26 @@ type WorkflowRuns struct { // ListWorkflowRunsOptions specifies optional parameters to ListWorkflowRuns. type ListWorkflowRunsOptions struct { - Actor string `url:"actor,omitempty"` - Branch string `url:"branch,omitempty"` - Event string `url:"event,omitempty"` - Status string `url:"status,omitempty"` - Created string `url:"created,omitempty"` + Actor string `url:"actor,omitempty"` + Branch string `url:"branch,omitempty"` + Event string `url:"event,omitempty"` + Status string `url:"status,omitempty"` + Created string `url:"created,omitempty"` + HeadSHA string `url:"head_sha,omitempty"` + ExcludePullRequests bool `url:"exclude_pull_requests,omitempty"` + CheckSuiteID int64 `url:"check_suite_id,omitempty"` ListOptions } // WorkflowRunUsage represents a usage of a specific workflow run. type WorkflowRunUsage struct { - Billable *WorkflowRunEnvironment `json:"billable,omitempty"` - RunDurationMS *int64 `json:"run_duration_ms,omitempty"` + Billable *WorkflowRunBillMap `json:"billable,omitempty"` + RunDurationMS *int64 `json:"run_duration_ms,omitempty"` } -// WorkflowRunEnvironment represents different runner environments available for a workflow run. -type WorkflowRunEnvironment struct { - Ubuntu *WorkflowRunBill `json:"UBUNTU,omitempty"` - MacOS *WorkflowRunBill `json:"MACOS,omitempty"` - Windows *WorkflowRunBill `json:"WINDOWS,omitempty"` -} +// WorkflowRunBillMap represents different runner environments available for a workflow run. +// Its key is the name of its environment, e.g. "UBUNTU", "MACOS", "WINDOWS", etc. +type WorkflowRunBillMap map[string]*WorkflowRunBill // WorkflowRunBill specifies billable time for a specific environment in a workflow run. type WorkflowRunBill struct { @@ -94,6 +95,14 @@ type WorkflowRunAttemptOptions struct { ExcludePullRequests *bool `url:"exclude_pull_requests,omitempty"` } +// PendingDeploymentsRequest specifies body parameters to PendingDeployments. +type PendingDeploymentsRequest struct { + EnvironmentIDs []int64 `json:"environment_ids"` + // State can be one of: "approved", "rejected". + State string `json:"state"` + Comment string `json:"comment"` +} + func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u, err := addOptions(endpoint, opts) if err != nil { @@ -198,6 +207,26 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo return run, resp, nil } +// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-attempt-logs +func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, followRedirects bool) (*url.URL, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber) + + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusFound { + return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) + } + + parsedURL, err := url.Parse(resp.Header.Get("Location")) + return parsedURL, newResponse(resp), err +} + // RerunWorkflowByID re-runs a workflow by ID. // // GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-workflow @@ -321,3 +350,23 @@ func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, rep return workflowRunUsage, resp, nil } + +// PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run +func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo string, runID int64, request *PendingDeploymentsRequest) ([]*Deployment, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/pending_deployments", owner, repo, runID) + + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + var deployments []*Deployment + resp, err := s.client.Do(ctx, req, &deployments) + if err != nil { + return nil, resp, err + } + + return deployments, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/actions_workflows.go b/vendor/github.com/google/go-github/v53/github/actions_workflows.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/actions_workflows.go rename to vendor/github.com/google/go-github/v53/github/actions_workflows.go index 9973a5d3f3..c9b47ed4be 100644 --- a/vendor/github.com/google/go-github/v45/github/actions_workflows.go +++ b/vendor/github.com/google/go-github/v53/github/actions_workflows.go @@ -32,15 +32,12 @@ type Workflows struct { // WorkflowUsage represents a usage of a specific workflow. type WorkflowUsage struct { - Billable *WorkflowEnvironment `json:"billable,omitempty"` + Billable *WorkflowBillMap `json:"billable,omitempty"` } -// WorkflowEnvironment represents different runner environments available for a workflow. -type WorkflowEnvironment struct { - Ubuntu *WorkflowBill `json:"UBUNTU,omitempty"` - MacOS *WorkflowBill `json:"MACOS,omitempty"` - Windows *WorkflowBill `json:"WINDOWS,omitempty"` -} +// WorkflowBillMap represents different runner environments available for a workflow. +// Its key is the name of its environment, e.g. "UBUNTU", "MACOS", "WINDOWS", etc. +type WorkflowBillMap map[string]*WorkflowBill // WorkflowBill specifies billable time for a specific environment in a workflow. type WorkflowBill struct { diff --git a/vendor/github.com/google/go-github/v45/github/activity.go b/vendor/github.com/google/go-github/v53/github/activity.go similarity index 83% rename from vendor/github.com/google/go-github/v45/github/activity.go rename to vendor/github.com/google/go-github/v53/github/activity.go index f99ecfcdff..9cd9f9b71d 100644 --- a/vendor/github.com/google/go-github/v45/github/activity.go +++ b/vendor/github.com/google/go-github/v53/github/activity.go @@ -45,14 +45,15 @@ type FeedLinks struct { // ListFeeds lists all the feeds available to the authenticated user. // // GitHub provides several timeline resources in Atom format: -// Timeline: The GitHub global public timeline -// User: The public timeline for any user, using URI template -// Current user public: The public timeline for the authenticated user -// Current user: The private timeline for the authenticated user -// Current user actor: The private timeline for activity created by the -// authenticated user -// Current user organizations: The private timeline for the organizations -// the authenticated user is a member of. +// +// Timeline: The GitHub global public timeline +// User: The public timeline for any user, using URI template +// Current user public: The public timeline for the authenticated user +// Current user: The private timeline for the authenticated user +// Current user actor: The private timeline for activity created by the +// authenticated user +// Current user organizations: The private timeline for the organizations +// the authenticated user is a member of. // // Note: Private feeds are only returned when authenticating via Basic Auth // since current feed URIs use the older, non revocable auth tokens. diff --git a/vendor/github.com/google/go-github/v45/github/activity_events.go b/vendor/github.com/google/go-github/v53/github/activity_events.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/activity_events.go rename to vendor/github.com/google/go-github/v53/github/activity_events.go diff --git a/vendor/github.com/google/go-github/v45/github/activity_notifications.go b/vendor/github.com/google/go-github/v53/github/activity_notifications.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/activity_notifications.go rename to vendor/github.com/google/go-github/v53/github/activity_notifications.go index 38a3184536..03476c2e2c 100644 --- a/vendor/github.com/google/go-github/v45/github/activity_notifications.go +++ b/vendor/github.com/google/go-github/v53/github/activity_notifications.go @@ -23,8 +23,8 @@ type Notification struct { Reason *string `json:"reason,omitempty"` Unread *bool `json:"unread,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - LastReadAt *time.Time `json:"last_read_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + LastReadAt *Timestamp `json:"last_read_at,omitempty"` URL *string `json:"url,omitempty"` } @@ -97,13 +97,13 @@ func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner } type markReadOptions struct { - LastReadAt time.Time `json:"last_read_at,omitempty"` + LastReadAt Timestamp `json:"last_read_at,omitempty"` } // MarkNotificationsRead marks all notifications up to lastRead as read. // // GitHub API docs: https://docs.github.com/en/rest/activity#mark-as-read -func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead time.Time) (*Response, error) { +func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead Timestamp) (*Response, error) { opts := &markReadOptions{ LastReadAt: lastRead, } @@ -119,7 +119,7 @@ func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead ti // the specified repository as read. // // GitHub API docs: https://docs.github.com/en/rest/activity/notifications#mark-repository-notifications-as-read -func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead time.Time) (*Response, error) { +func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead Timestamp) (*Response, error) { opts := &markReadOptions{ LastReadAt: lastRead, } diff --git a/vendor/github.com/google/go-github/v45/github/activity_star.go b/vendor/github.com/google/go-github/v53/github/activity_star.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/activity_star.go rename to vendor/github.com/google/go-github/v53/github/activity_star.go diff --git a/vendor/github.com/google/go-github/v45/github/activity_watching.go b/vendor/github.com/google/go-github/v53/github/activity_watching.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/activity_watching.go rename to vendor/github.com/google/go-github/v53/github/activity_watching.go diff --git a/vendor/github.com/google/go-github/v45/github/admin.go b/vendor/github.com/google/go-github/v53/github/admin.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/admin.go rename to vendor/github.com/google/go-github/v53/github/admin.go diff --git a/vendor/github.com/google/go-github/v45/github/admin_orgs.go b/vendor/github.com/google/go-github/v53/github/admin_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/admin_orgs.go rename to vendor/github.com/google/go-github/v53/github/admin_orgs.go diff --git a/vendor/github.com/google/go-github/v45/github/admin_stats.go b/vendor/github.com/google/go-github/v53/github/admin_stats.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/admin_stats.go rename to vendor/github.com/google/go-github/v53/github/admin_stats.go diff --git a/vendor/github.com/google/go-github/v45/github/admin_users.go b/vendor/github.com/google/go-github/v53/github/admin_users.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/admin_users.go rename to vendor/github.com/google/go-github/v53/github/admin_users.go diff --git a/vendor/github.com/google/go-github/v45/github/apps.go b/vendor/github.com/google/go-github/v53/github/apps.go similarity index 90% rename from vendor/github.com/google/go-github/v45/github/apps.go rename to vendor/github.com/google/go-github/v53/github/apps.go index dff9b210f2..ab83d59ab2 100644 --- a/vendor/github.com/google/go-github/v45/github/apps.go +++ b/vendor/github.com/google/go-github/v53/github/apps.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // AppsService provides access to the installation related functions @@ -19,24 +18,25 @@ type AppsService service // App represents a GitHub App. type App struct { - ID *int64 `json:"id,omitempty"` - Slug *string `json:"slug,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - ExternalURL *string `json:"external_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Permissions *InstallationPermissions `json:"permissions,omitempty"` - Events []string `json:"events,omitempty"` + ID *int64 `json:"id,omitempty"` + Slug *string `json:"slug,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Owner *User `json:"owner,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + ExternalURL *string `json:"external_url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + Permissions *InstallationPermissions `json:"permissions,omitempty"` + Events []string `json:"events,omitempty"` + InstallationsCount *int `json:"installations_count,omitempty"` } // InstallationToken represents an installation token. type InstallationToken struct { Token *string `json:"token,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` + ExpiresAt *Timestamp `json:"expires_at,omitempty"` Permissions *InstallationPermissions `json:"permissions,omitempty"` Repositories []*Repository `json:"repositories,omitempty"` } @@ -59,8 +59,9 @@ type InstallationTokenOptions struct { // InstallationPermissions lists the repository and organization permissions for an installation. // // Permission names taken from: -// https://docs.github.com/en/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app -// https://docs.github.com/en/rest/apps#create-an-installation-access-token-for-an-app +// +// https://docs.github.com/en/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app +// https://docs.github.com/en/rest/apps#create-an-installation-access-token-for-an-app type InstallationPermissions struct { Actions *string `json:"actions,omitempty"` Administration *string `json:"administration,omitempty"` @@ -76,7 +77,9 @@ type InstallationPermissions struct { Metadata *string `json:"metadata,omitempty"` Members *string `json:"members,omitempty"` OrganizationAdministration *string `json:"organization_administration,omitempty"` + OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"` OrganizationHooks *string `json:"organization_hooks,omitempty"` + OrganizationPackages *string `json:"organization_packages,omitempty"` OrganizationPlan *string `json:"organization_plan,omitempty"` OrganizationPreReceiveHooks *string `json:"organization_pre_receive_hooks,omitempty"` OrganizationProjects *string `json:"organization_projects,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/apps_hooks.go b/vendor/github.com/google/go-github/v53/github/apps_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/apps_hooks.go rename to vendor/github.com/google/go-github/v53/github/apps_hooks.go diff --git a/vendor/github.com/google/go-github/v45/github/apps_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/apps_hooks_deliveries.go rename to vendor/github.com/google/go-github/v53/github/apps_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v45/github/apps_installation.go b/vendor/github.com/google/go-github/v53/github/apps_installation.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/apps_installation.go rename to vendor/github.com/google/go-github/v53/github/apps_installation.go diff --git a/vendor/github.com/google/go-github/v45/github/apps_manifest.go b/vendor/github.com/google/go-github/v53/github/apps_manifest.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/apps_manifest.go rename to vendor/github.com/google/go-github/v53/github/apps_manifest.go diff --git a/vendor/github.com/google/go-github/v45/github/apps_marketplace.go b/vendor/github.com/google/go-github/v53/github/apps_marketplace.go similarity index 91% rename from vendor/github.com/google/go-github/v45/github/apps_marketplace.go rename to vendor/github.com/google/go-github/v53/github/apps_marketplace.go index 8253013684..32889abd24 100644 --- a/vendor/github.com/google/go-github/v45/github/apps_marketplace.go +++ b/vendor/github.com/google/go-github/v53/github/apps_marketplace.go @@ -46,6 +46,7 @@ type MarketplacePlan struct { // MarketplacePurchase represents a GitHub Apps Marketplace Purchase. type MarketplacePurchase struct { + Account *MarketplacePurchaseAccount `json:"account,omitempty"` // BillingCycle can be one of the values "yearly", "monthly" or nil. BillingCycle *string `json:"billing_cycle,omitempty"` NextBillingDate *Timestamp `json:"next_billing_date,omitempty"` @@ -75,6 +76,17 @@ type MarketplacePlanAccount struct { MarketplacePendingChange *MarketplacePendingChange `json:"marketplace_pending_change,omitempty"` } +// MarketplacePurchaseAccount represents a GitHub Account (user or organization) for a Purchase. +type MarketplacePurchaseAccount struct { + URL *string `json:"url,omitempty"` + Type *string `json:"type,omitempty"` + ID *int64 `json:"id,omitempty"` + Login *string `json:"login,omitempty"` + OrganizationBillingEmail *string `json:"organization_billing_email,omitempty"` + Email *string `json:"email,omitempty"` + NodeID *string `json:"node_id,omitempty"` +} + // ListPlans lists all plans for your Marketplace listing. // // GitHub API docs: https://docs.github.com/en/rest/apps#list-plans diff --git a/vendor/github.com/google/go-github/v45/github/authorizations.go b/vendor/github.com/google/go-github/v53/github/authorizations.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/authorizations.go rename to vendor/github.com/google/go-github/v53/github/authorizations.go diff --git a/vendor/github.com/google/go-github/v45/github/billing.go b/vendor/github.com/google/go-github/v53/github/billing.go similarity index 87% rename from vendor/github.com/google/go-github/v45/github/billing.go rename to vendor/github.com/google/go-github/v53/github/billing.go index d516cd0c29..7a76bf86fd 100644 --- a/vendor/github.com/google/go-github/v45/github/billing.go +++ b/vendor/github.com/google/go-github/v53/github/billing.go @@ -18,30 +18,27 @@ type BillingService service // ActionBilling represents a GitHub Action billing. type ActionBilling struct { - TotalMinutesUsed int `json:"total_minutes_used"` + TotalMinutesUsed float64 `json:"total_minutes_used"` TotalPaidMinutesUsed float64 `json:"total_paid_minutes_used"` - IncludedMinutes int `json:"included_minutes"` + IncludedMinutes float64 `json:"included_minutes"` MinutesUsedBreakdown MinutesUsedBreakdown `json:"minutes_used_breakdown"` } -type MinutesUsedBreakdown struct { - Ubuntu int `json:"UBUNTU"` - MacOS int `json:"MACOS"` - Windows int `json:"WINDOWS"` -} +// MinutesUsedBreakdown counts the actions minutes used by machine type (e.g. UBUNTU, WINDOWS, MACOS). +type MinutesUsedBreakdown = map[string]int // PackageBilling represents a GitHub Package billing. type PackageBilling struct { - TotalGigabytesBandwidthUsed int `json:"total_gigabytes_bandwidth_used"` - TotalPaidGigabytesBandwidthUsed int `json:"total_paid_gigabytes_bandwidth_used"` - IncludedGigabytesBandwidth int `json:"included_gigabytes_bandwidth"` + TotalGigabytesBandwidthUsed int `json:"total_gigabytes_bandwidth_used"` + TotalPaidGigabytesBandwidthUsed int `json:"total_paid_gigabytes_bandwidth_used"` + IncludedGigabytesBandwidth float64 `json:"included_gigabytes_bandwidth"` } // StorageBilling represents a GitHub Storage billing. type StorageBilling struct { DaysLeftInBillingCycle int `json:"days_left_in_billing_cycle"` EstimatedPaidStorageForMonth float64 `json:"estimated_paid_storage_for_month"` - EstimatedStorageForMonth int `json:"estimated_storage_for_month"` + EstimatedStorageForMonth float64 `json:"estimated_storage_for_month"` } // ActiveCommitters represents the total active committers across all repositories in an Organization. @@ -123,9 +120,14 @@ func (s *BillingService) GetStorageBillingOrg(ctx context.Context, org string) ( // GetAdvancedSecurityActiveCommittersOrg returns the GitHub Advanced Security active committers for an organization per repository. // -// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-advanced-security-active-committers-for-an-organization -func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string) (*ActiveCommitters, *Response, error) { +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/billing?apiVersion=2022-11-28#get-github-advanced-security-active-committers-for-an-organization +func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string, opts *ListOptions) (*ActiveCommitters, *Response, error) { u := fmt.Sprintf("orgs/%v/settings/billing/advanced-security", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err diff --git a/vendor/github.com/google/go-github/v45/github/checks.go b/vendor/github.com/google/go-github/v53/github/checks.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/checks.go rename to vendor/github.com/google/go-github/v53/github/checks.go diff --git a/vendor/github.com/google/go-github/v45/github/code-scanning.go b/vendor/github.com/google/go-github/v53/github/code-scanning.go similarity index 86% rename from vendor/github.com/google/go-github/v45/github/code-scanning.go rename to vendor/github.com/google/go-github/v53/github/code-scanning.go index df8ed86b51..6717348ed7 100644 --- a/vendor/github.com/google/go-github/v45/github/code-scanning.go +++ b/vendor/github.com/google/go-github/v53/github/code-scanning.go @@ -87,6 +87,7 @@ type Alert struct { DismissedBy *User `json:"dismissed_by,omitempty"` DismissedAt *Timestamp `json:"dismissed_at,omitempty"` DismissedReason *string `json:"dismissed_reason,omitempty"` + DismissedComment *string `json:"dismissed_comment,omitempty"` InstancesURL *string `json:"instances_url,omitempty"` } @@ -121,6 +122,10 @@ type AlertListOptions struct { // Return code scanning alerts for a specific branch reference. The ref must be formatted as heads/. Ref string `url:"ref,omitempty"` + ListCursorOptions + + // Add ListOptions so offset pagination with integer type "page" query parameter is accepted + // since ListCursorOptions accepts "page" as string only. ListOptions } @@ -168,6 +173,22 @@ type SarifAnalysis struct { ToolName *string `json:"tool_name,omitempty"` } +// CodeScanningAlertState specifies the state of a code scanning alert. +// +// GitHub API docs: https://docs.github.com/en/rest/code-scanning +type CodeScanningAlertState struct { + // State sets the state of the code scanning alert and is a required field. + // You must also provide DismissedReason when you set the state to "dismissed". + // State can be one of: "open", "dismissed". + State string `json:"state"` + // DismissedReason represents the reason for dismissing or closing the alert. + // It is required when the state is "dismissed". + // It can be one of: "false positive", "won't fix", "used in tests". + DismissedReason *string `json:"dismissed_reason,omitempty"` + // DismissedComment is associated with the dismissal of the alert. + DismissedComment *string `json:"dismissed_comment,omitempty"` +} + // SarifID identifies a sarif analysis upload. // // GitHub API docs: https://docs.github.com/en/rest/code-scanning @@ -256,6 +277,31 @@ func (s *CodeScanningService) GetAlert(ctx context.Context, owner, repo string, return a, resp, nil } +// UpdateAlert updates the state of a single code scanning alert for a repository. +// +// You must use an access token with the security_events scope to use this endpoint. +// GitHub Apps must have the security_events read permission to use this endpoint. +// +// The security alert_id is the number at the end of the security alert's URL. +// +// GitHub API docs: https://docs.github.com/en/rest/code-scanning?apiVersion=2022-11-28#update-a-code-scanning-alert +func (s *CodeScanningService) UpdateAlert(ctx context.Context, owner, repo string, id int64, stateInfo *CodeScanningAlertState) (*Alert, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts/%v", owner, repo, id) + + req, err := s.client.NewRequest("PATCH", u, stateInfo) + if err != nil { + return nil, nil, err + } + + a := new(Alert) + resp, err := s.client.Do(ctx, req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} + // UploadSarif uploads the result of code scanning job to GitHub. // // For the parameter sarif, you must first compress your SARIF file using gzip and then translate the contents of the file into a Base64 encoding string. diff --git a/vendor/github.com/google/go-github/v45/github/dependabot.go b/vendor/github.com/google/go-github/v53/github/dependabot.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/dependabot.go rename to vendor/github.com/google/go-github/v53/github/dependabot.go diff --git a/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go b/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go new file mode 100644 index 0000000000..7b5d53b393 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/dependabot_alerts.go @@ -0,0 +1,135 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// Dependency reprensents the vulnerable dependency. +type Dependency struct { + Package *VulnerabilityPackage `json:"package,omitempty"` + ManifestPath *string `json:"manifest_path,omitempty"` + Scope *string `json:"scope,omitempty"` +} + +// AdvisoryCVSs represents the advisory pertaining to the Common Vulnerability Scoring System. +type AdvisoryCVSs struct { + Score *float64 `json:"score,omitempty"` + VectorString *string `json:"vector_string,omitempty"` +} + +// AdvisoryCWEs reprensent the advisory pertaining to Common Weakness Enumeration. +type AdvisoryCWEs struct { + CWEID *string `json:"cwe_id,omitempty"` + Name *string `json:"name,omitempty"` +} + +// DependabotSecurityAdvisory represents the GitHub Security Advisory. +type DependabotSecurityAdvisory struct { + GHSAID *string `json:"ghsa_id,omitempty"` + CVEID *string `json:"cve_id,omitempty"` + Summary *string `json:"summary,omitempty"` + Description *string `json:"description,omitempty"` + Vulnerabilities []*AdvisoryVulnerability `json:"vulnerabilities,omitempty"` + Severity *string `json:"severity,omitempty"` + CVSs *AdvisoryCVSs `json:"cvss,omitempty"` + CWEs []*AdvisoryCWEs `json:"cwes,omitempty"` + Identifiers []*AdvisoryIdentifier `json:"identifiers,omitempty"` + References []*AdvisoryReference `json:"references,omitempty"` + PublishedAt *Timestamp `json:"published_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + WithdrawnAt *Timestamp `json:"withdrawn_at,omitempty"` +} + +// DependabotAlert represents a Dependabot alert. +type DependabotAlert struct { + Number *int `json:"number,omitempty"` + State *string `json:"state,omitempty"` + Dependency *Dependency `json:"dependency,omitempty"` + SecurityAdvisory *DependabotSecurityAdvisory `json:"security_advisory,omitempty"` + SecurityVulnerability *AdvisoryVulnerability `json:"security_vulnerability,omitempty"` + URL *string `json:"url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + DismissedAt *Timestamp `json:"dismissed_at,omitempty"` + DismissedBy *User `json:"dismissed_by,omitempty"` + DismissedReason *string `json:"dismissed_reason,omitempty"` + DismissedComment *string `json:"dismissed_comment,omitempty"` + FixedAt *Timestamp `json:"fixed_at,omitempty"` + Repository *Repository `json:"repository,omitempty"` +} + +// ListAlertsOptions specifies the optional parameters to the DependabotService.ListRepoAlerts +// and DependabotService.ListOrgAlerts methods. +type ListAlertsOptions struct { + State *string `url:"state,omitempty"` + Severity *string `url:"severity,omitempty"` + Ecosystem *string `url:"ecosystem,omitempty"` + Package *string `url:"package,omitempty"` + Scope *string `url:"scope,omitempty"` + Sort *string `url:"sort,omitempty"` + Direction *string `url:"direction,omitempty"` + + ListCursorOptions +} + +func (s *DependabotService) listAlerts(ctx context.Context, url string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { + u, err := addOptions(url, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alerts []*DependabotAlert + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + + return alerts, resp, nil +} + +// ListRepoAlerts lists all Dependabot alerts of a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-a-repository +func (s *DependabotService) ListRepoAlerts(ctx context.Context, owner, repo string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/dependabot/alerts", owner, repo) + return s.listAlerts(ctx, url, opts) +} + +// ListOrgAlerts lists all Dependabot alerts of an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-an-organization +func (s *DependabotService) ListOrgAlerts(ctx context.Context, org string, opts *ListAlertsOptions) ([]*DependabotAlert, *Response, error) { + url := fmt.Sprintf("orgs/%v/dependabot/alerts", org) + return s.listAlerts(ctx, url, opts) +} + +// GetRepoAlert gets a single repository Dependabot alert. +// +// GitHub API docs: https://docs.github.com/en/rest/dependabot/alerts#get-a-dependabot-alert +func (s *DependabotService) GetRepoAlert(ctx context.Context, owner, repo string, number int) (*DependabotAlert, *Response, error) { + url := fmt.Sprintf("repos/%v/%v/dependabot/alerts/%v", owner, repo, number) + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + alert := new(DependabotAlert) + resp, err := s.client.Do(ctx, req, alert) + if err != nil { + return nil, resp, err + } + + return alert, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/dependabot_secrets.go b/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go similarity index 86% rename from vendor/github.com/google/go-github/v45/github/dependabot_secrets.go rename to vendor/github.com/google/go-github/v53/github/dependabot_secrets.go index f51f3396bd..f87ab42c39 100644 --- a/vendor/github.com/google/go-github/v45/github/dependabot_secrets.go +++ b/vendor/github.com/google/go-github/v53/github/dependabot_secrets.go @@ -110,7 +110,20 @@ func (s *DependabotService) GetOrgSecret(ctx context.Context, org, name string) return s.getSecret(ctx, url) } -func (s *DependabotService) putSecret(ctx context.Context, url string, eSecret *EncryptedSecret) (*Response, error) { +// DependabotEncryptedSecret represents a secret that is encrypted using a public key for Dependabot. +// +// The value of EncryptedValue must be your secret, encrypted with +// LibSodium (see documentation here: https://libsodium.gitbook.io/doc/bindings_for_other_languages) +// using the public key retrieved using the GetPublicKey method. +type DependabotEncryptedSecret struct { + Name string `json:"-"` + KeyID string `json:"key_id"` + EncryptedValue string `json:"encrypted_value"` + Visibility string `json:"visibility,omitempty"` + SelectedRepositoryIDs DependabotSecretsSelectedRepoIDs `json:"selected_repository_ids,omitempty"` +} + +func (s *DependabotService) putSecret(ctx context.Context, url string, eSecret *DependabotEncryptedSecret) (*Response, error) { req, err := s.client.NewRequest("PUT", url, eSecret) if err != nil { return nil, err @@ -122,7 +135,7 @@ func (s *DependabotService) putSecret(ctx context.Context, url string, eSecret * // CreateOrUpdateRepoSecret creates or updates a repository Dependabot secret with an encrypted value. // // GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-a-repository-secret -func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { +func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *DependabotEncryptedSecret) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, eSecret.Name) return s.putSecret(ctx, url, eSecret) } @@ -130,7 +143,7 @@ func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, // CreateOrUpdateOrgSecret creates or updates an organization Dependabot secret with an encrypted value. // // GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-an-organization-secret -func (s *DependabotService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { +func (s *DependabotService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *DependabotEncryptedSecret) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, eSecret.Name) return s.putSecret(ctx, url, eSecret) } @@ -184,13 +197,16 @@ func (s *DependabotService) ListSelectedReposForOrgSecret(ctx context.Context, o return result, resp, nil } +// DependabotSecretsSelectedRepoIDs are the repository IDs that have access to the dependabot secrets. +type DependabotSecretsSelectedRepoIDs []int64 + // SetSelectedReposForOrgSecret sets the repositories that have access to a Dependabot secret. // // GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#set-selected-repositories-for-an-organization-secret -func (s *DependabotService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { +func (s *DependabotService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids DependabotSecretsSelectedRepoIDs) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories", org, name) type repoIDs struct { - SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` + SelectedIDs DependabotSecretsSelectedRepoIDs `json:"selected_repository_ids"` } req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) diff --git a/vendor/github.com/google/go-github/v45/github/doc.go b/vendor/github.com/google/go-github/v53/github/doc.go similarity index 97% rename from vendor/github.com/google/go-github/v45/github/doc.go rename to vendor/github.com/google/go-github/v53/github/doc.go index 38cda12b2b..af2847111a 100644 --- a/vendor/github.com/google/go-github/v45/github/doc.go +++ b/vendor/github.com/google/go-github/v53/github/doc.go @@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API. Usage: - import "github.com/google/go-github/v45/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) + import "github.com/google/go-github/v53/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) import "github.com/google/go-github/github" // with go modules disabled Construct a new GitHub client, then use the various services on the client to @@ -38,7 +38,7 @@ can be used as a starting point. For more sample code snippets, head over to the https://github.com/google/go-github/tree/master/example directory. -Authentication +# Authentication The go-github library does not directly handle authentication. Instead, when creating a new client, pass an http.Client that can handle authentication for @@ -110,7 +110,7 @@ To authenticate as an app, using a JWT: // Use client... } -Rate Limiting +# Rate Limiting GitHub imposes a rate limit on all API clients. Unauthenticated clients are limited to 60 requests per hour, while authenticated clients can make up to @@ -139,7 +139,7 @@ For secondary rate limits, you can check if its type is *github.AbuseRateLimitEr Learn more about GitHub rate limiting at https://docs.github.com/en/rest/rate-limit . -Accepted Status +# Accepted Status Some endpoints may return a 202 Accepted status code, meaning that the information required is not yet ready and was scheduled to be gathered on @@ -154,7 +154,7 @@ To detect this condition of error, you can check if its type is log.Println("scheduled on GitHub side") } -Conditional Requests +# Conditional Requests The GitHub API has good support for conditional requests which will help prevent you from burning through your rate limit, as well as help speed up your @@ -165,7 +165,7 @@ https://github.com/gregjones/httpcache for that. Learn more about GitHub conditional requests at https://docs.github.com/en/rest/overview/resources-in-the-rest-api#conditional-requests. -Creating and Updating Resources +# Creating and Updating Resources All structs for GitHub resources use pointer values for all non-repeated fields. This allows distinguishing between unset fields and those set to a zero-value. @@ -181,7 +181,7 @@ bool, and int values. For example: Users who have worked with protocol buffers should find this pattern familiar. -Pagination +# Pagination All requests for resource collections (repos, pull requests, issues, etc.) support pagination. Pagination options are described in the @@ -208,6 +208,5 @@ github.Response struct. } opt.Page = resp.NextPage } - */ package github diff --git a/vendor/github.com/google/go-github/v45/github/enterprise.go b/vendor/github.com/google/go-github/v53/github/enterprise.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/enterprise.go rename to vendor/github.com/google/go-github/v53/github/enterprise.go diff --git a/vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go similarity index 75% rename from vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go rename to vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go index f2ba166360..daafc5e628 100644 --- a/vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go +++ b/vendor/github.com/google/go-github/v53/github/enterprise_actions_runners.go @@ -10,6 +10,25 @@ import ( "fmt" ) +// ListRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-an-enterprise +func (s *EnterpriseService) ListRunnerApplicationDownloads(ctx context.Context, enterprise string) ([]*RunnerApplicationDownload, *Response, error) { + u := fmt.Sprintf("enterprises/%v/actions/runners/downloads", enterprise) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var rads []*RunnerApplicationDownload + resp, err := s.client.Do(ctx, req, &rads) + if err != nil { + return nil, resp, err + } + + return rads, resp, nil +} + // CreateRegistrationToken creates a token that can be used to add a self-hosted runner. // // GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-enterprise diff --git a/vendor/github.com/google/go-github/v45/github/enterprise_audit_log.go b/vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/enterprise_audit_log.go rename to vendor/github.com/google/go-github/v53/github/enterprise_audit_log.go diff --git a/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go b/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go new file mode 100644 index 0000000000..3980a86aa4 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/enterprise_code_security_and_analysis.go @@ -0,0 +1,78 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// EnterpriseSecurityAnalysisSettings represents security analysis settings for an enterprise. +type EnterpriseSecurityAnalysisSettings struct { + AdvancedSecurityEnabledForNewRepositories *bool `json:"advanced_security_enabled_for_new_repositories,omitempty"` + SecretScanningEnabledForNewRepositories *bool `json:"secret_scanning_enabled_for_new_repositories,omitempty"` + SecretScanningPushProtectionEnabledForNewRepositories *bool `json:"secret_scanning_push_protection_enabled_for_new_repositories,omitempty"` + SecretScanningPushProtectionCustomLink *string `json:"secret_scanning_push_protection_custom_link,omitempty"` +} + +// GetCodeSecurityAndAnalysis gets code security and analysis features for an enterprise. +// +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#get-code-security-and-analysis-features-for-an-enterprise +func (s *EnterpriseService) GetCodeSecurityAndAnalysis(ctx context.Context, enterprise string) (*EnterpriseSecurityAnalysisSettings, *Response, error) { + u := fmt.Sprintf("enterprises/%v/code_security_and_analysis", enterprise) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + settings := new(EnterpriseSecurityAnalysisSettings) + resp, err := s.client.Do(ctx, req, settings) + if err != nil { + return nil, resp, err + } + + return settings, resp, nil +} + +// UpdateCodeSecurityAndAnalysis updates code security and analysis features for new repositories in an enterprise. +// +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#update-code-security-and-analysis-features-for-an-enterprise +func (s *EnterpriseService) UpdateCodeSecurityAndAnalysis(ctx context.Context, enterprise string, settings *EnterpriseSecurityAnalysisSettings) (*Response, error) { + u := fmt.Sprintf("enterprises/%v/code_security_and_analysis", enterprise) + req, err := s.client.NewRequest("PATCH", u, settings) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// EnableDisableSecurityFeature enables or disables a security feature for all repositories in an enterprise. +// +// Valid values for securityProduct: "advanced_security", "secret_scanning", "secret_scanning_push_protection". +// Valid values for enablement: "enable_all", "disable_all". +// +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/enterprise-admin/code-security-and-analysis?apiVersion=2022-11-28#enable-or-disable-a-security-feature +func (s *EnterpriseService) EnableDisableSecurityFeature(ctx context.Context, enterprise, securityProduct, enablement string) (*Response, error) { + u := fmt.Sprintf("enterprises/%v/%v/%v", enterprise, securityProduct, enablement) + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/event.go b/vendor/github.com/google/go-github/v53/github/event.go similarity index 93% rename from vendor/github.com/google/go-github/v45/github/event.go rename to vendor/github.com/google/go-github/v53/github/event.go index 5a052de09c..20907a9932 100644 --- a/vendor/github.com/google/go-github/v45/github/event.go +++ b/vendor/github.com/google/go-github/v53/github/event.go @@ -7,7 +7,6 @@ package github import ( "encoding/json" - "time" ) // Event represents a GitHub event. @@ -18,7 +17,7 @@ type Event struct { Repo *Repository `json:"repo,omitempty"` Actor *User `json:"actor,omitempty"` Org *Organization `json:"org,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` ID *string `json:"id,omitempty"` } @@ -36,6 +35,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &CheckRunEvent{} case "CheckSuiteEvent": payload = &CheckSuiteEvent{} + case "CodeScanningAlertEvent": + payload = &CodeScanningAlertEvent{} case "CommitCommentEvent": payload = &CommitCommentEvent{} case "ContentReferenceEvent": @@ -48,10 +49,14 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &DeployKeyEvent{} case "DeploymentEvent": payload = &DeploymentEvent{} + case "DeploymentProtectionRuleEvent": + payload = &DeploymentProtectionRuleEvent{} case "DeploymentStatusEvent": payload = &DeploymentStatusEvent{} case "DiscussionEvent": payload = &DiscussionEvent{} + case "DiscussionCommentEvent": + payload = &DiscussionCommentEvent{} case "ForkEvent": payload = &ForkEvent{} case "GitHubAppAuthorizationEvent": @@ -74,6 +79,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &MemberEvent{} case "MembershipEvent": payload = &MembershipEvent{} + case "MergeGroupEvent": + payload = &MergeGroupEvent{} case "MetaEvent": payload = &MetaEvent{} case "MilestoneEvent": diff --git a/vendor/github.com/google/go-github/v45/github/event_types.go b/vendor/github.com/google/go-github/v53/github/event_types.go similarity index 92% rename from vendor/github.com/google/go-github/v45/github/event_types.go rename to vendor/github.com/google/go-github/v53/github/event_types.go index b550361848..6a13b286bd 100644 --- a/vendor/github.com/google/go-github/v45/github/event_types.go +++ b/vendor/github.com/google/go-github/v53/github/event_types.go @@ -173,6 +173,25 @@ type DeploymentEvent struct { Installation *Installation `json:"installation,omitempty"` } +// DeploymentProtectionRuleEvent represents a deployment protection rule event. +// The Webhook event name is "deployment_protection_rule". +// +// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads#deployment_protection_rule +type DeploymentProtectionRuleEvent struct { + Action *string `json:"action,omitempty"` + Environment *string `json:"environment,omitempty"` + Event *string `json:"event,omitempty"` + + // The URL Github provides for a third-party to use in order to pass/fail a deployment gate + DeploymentCallbackURL *string `json:"deployment_callback_url,omitempty"` + Deployment *Deployment `json:"deployment,omitempty"` + Repo *Repository `json:"repository,omitempty"` + Organization *Organization `json:"organization,omitempty"` + PullRequests []*PullRequest `json:"pull_requests,omitempty"` + Sender *User `json:"sender,omitempty"` + Installation *Installation `json:"installation,omitempty"` +} + // DeploymentStatusEvent represents a deployment status. // The Webhook event name is "deployment_status". // @@ -189,6 +208,39 @@ type DeploymentStatusEvent struct { Installation *Installation `json:"installation,omitempty"` } +// DiscussionCommentEvent represents a webhook event for a comment on discussion. +// The Webhook event name is "discussion_comment". +// +// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads#discussion_comment +type DiscussionCommentEvent struct { + // Action is the action that was performed on the comment. + // Possible values are: "created", "edited", "deleted". ** check what all can be added + Action *string `json:"action,omitempty"` + Discussion *Discussion `json:"discussion,omitempty"` + Comment *CommentDiscussion `json:"comment,omitempty"` + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` + Installation *Installation `json:"installation,omitempty"` +} + +// CommentDiscussion represents a comment in a GitHub DiscussionCommentEvent. +type CommentDiscussion struct { + AuthorAssociation *string `json:"author_association,omitempty"` + Body *string `json:"body,omitempty"` + ChildCommentCount *int `json:"child_comment_count,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + DiscussionID *int64 `json:"discussion_id,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + ParentID *int64 `json:"parent_id,omitempty"` + Reactions *Reactions `json:"reactions,omitempty"` + RepositoryURL *string `json:"repository_url,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + User *User `json:"user,omitempty"` +} + // DiscussionEvent represents a webhook event for a discussion. // The Webhook event name is "discussion". // @@ -299,6 +351,7 @@ type EditChange struct { Body *EditBody `json:"body,omitempty"` Base *EditBase `json:"base,omitempty"` Repo *EditRepo `json:"repository,omitempty"` + Owner *EditOwner `json:"owner,omitempty"` } // EditTitle represents a pull-request title change. @@ -327,6 +380,17 @@ type EditRepo struct { Name *RepoName `json:"name,omitempty"` } +// EditOwner represents a change of repository ownership. +type EditOwner struct { + OwnerInfo *OwnerInfo `json:"from,omitempty"` +} + +// OwnerInfo represents the account info of the owner of the repo (could be User or Organization but both are User structs). +type OwnerInfo struct { + User *User `json:"user,omitempty"` + Org *User `json:"organization,omitempty"` +} + // RepoName represents a change of repository name. type RepoName struct { From *string `json:"from,omitempty"` @@ -424,7 +488,7 @@ type InstallationEvent struct { Repositories []*Repository `json:"repositories,omitempty"` Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` - // TODO key "requester" is not covered + Requester *User `json:"requester,omitempty"` } // InstallationRepositoriesEvent is triggered when a repository is added or @@ -485,6 +549,7 @@ type IssuesEvent struct { Repo *Repository `json:"repository,omitempty"` Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` + Milestone *Milestone `json:"milestone,omitempty"` } // LabelEvent is triggered when a repository's label is created, edited, or deleted. @@ -559,6 +624,37 @@ type MembershipEvent struct { Installation *Installation `json:"installation,omitempty"` } +// MergeGroup represents the merge group in a merge queue. +type MergeGroup struct { + // The SHA of the merge group. + HeadSHA *string `json:"head_sha,omitempty"` + // The full ref of the merge group. + HeadRef *string `json:"head_ref,omitempty"` + // The SHA of the merge group's parent commit. + BaseSHA *string `json:"base_sha,omitempty"` + // The full ref of the branch the merge group will be merged into. + BaseRef *string `json:"base_ref,omitempty"` + // An expanded representation of the head_sha commit. + HeadCommit *Commit `json:"head_commit,omitempty"` +} + +// MergeGroupEvent represents activity related to merge groups in a merge queue. The type of activity is specified +// in the action property of the payload object. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#merge_group +type MergeGroupEvent struct { + // The action that was performed. Currently, can only be checks_requested. + Action *string `json:"action,omitempty"` + // The merge group. + MergeGroup *MergeGroup `json:"merge_group,omitempty"` + + // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Installation *Installation `json:"installation,omitempty"` + Sender *User `json:"sender,omitempty"` +} + // MetaEvent is triggered when the webhook that this event is configured on is deleted. // This event will only listen for changes to the particular hook the event is installed on. // Therefore, it must be selected for each hook that you'd like to receive meta events for. @@ -997,6 +1093,7 @@ type PushEventRepository struct { SSHURL *string `json:"ssh_url,omitempty"` CloneURL *string `json:"clone_url,omitempty"` SVNURL *string `json:"svn_url,omitempty"` + Topics []string `json:"topics,omitempty"` } // PushEventRepoOwner is a basic representation of user/org in a PushEvent payload. @@ -1358,4 +1455,6 @@ type CodeScanningAlertEvent struct { Repo *Repository `json:"repository,omitempty"` Org *Organization `json:"organization,omitempty"` Sender *User `json:"sender,omitempty"` + + Installation *Installation `json:"installation,omitempty"` } diff --git a/vendor/github.com/google/go-github/v45/github/gists.go b/vendor/github.com/google/go-github/v53/github/gists.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/gists.go rename to vendor/github.com/google/go-github/v53/github/gists.go index ecdc6f2726..80961fcb90 100644 --- a/vendor/github.com/google/go-github/v45/github/gists.go +++ b/vendor/github.com/google/go-github/v53/github/gists.go @@ -28,8 +28,8 @@ type Gist struct { HTMLURL *string `json:"html_url,omitempty"` GitPullURL *string `json:"git_pull_url,omitempty"` GitPushURL *string `json:"git_push_url,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` NodeID *string `json:"node_id,omitempty"` } diff --git a/vendor/github.com/google/go-github/v45/github/gists_comments.go b/vendor/github.com/google/go-github/v53/github/gists_comments.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/gists_comments.go rename to vendor/github.com/google/go-github/v53/github/gists_comments.go index d551e9a11d..ee0fbfa45f 100644 --- a/vendor/github.com/google/go-github/v45/github/gists_comments.go +++ b/vendor/github.com/google/go-github/v53/github/gists_comments.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // GistComment represents a Gist comment. @@ -17,7 +16,7 @@ type GistComment struct { URL *string `json:"url,omitempty"` Body *string `json:"body,omitempty"` User *User `json:"user,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` } func (g GistComment) String() string { diff --git a/vendor/github.com/google/go-github/v45/github/git.go b/vendor/github.com/google/go-github/v53/github/git.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/git.go rename to vendor/github.com/google/go-github/v53/github/git.go diff --git a/vendor/github.com/google/go-github/v45/github/git_blobs.go b/vendor/github.com/google/go-github/v53/github/git_blobs.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/git_blobs.go rename to vendor/github.com/google/go-github/v53/github/git_blobs.go diff --git a/vendor/github.com/google/go-github/v45/github/git_commits.go b/vendor/github.com/google/go-github/v53/github/git_commits.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/git_commits.go rename to vendor/github.com/google/go-github/v53/github/git_commits.go index baedb3d686..862837c0d5 100644 --- a/vendor/github.com/google/go-github/v45/github/git_commits.go +++ b/vendor/github.com/google/go-github/v53/github/git_commits.go @@ -11,9 +11,8 @@ import ( "errors" "fmt" "strings" - "time" - "golang.org/x/crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp" ) // SignatureVerification represents GPG signature verification. @@ -56,7 +55,7 @@ func (c Commit) String() string { // CommitAuthor represents the author or committer of a commit. The commit // author may not correspond to a GitHub User. type CommitAuthor struct { - Date *time.Time `json:"date,omitempty"` + Date *Timestamp `json:"date,omitempty"` Name *string `json:"name,omitempty"` Email *string `json:"email,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/git_refs.go b/vendor/github.com/google/go-github/v53/github/git_refs.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/git_refs.go rename to vendor/github.com/google/go-github/v53/github/git_refs.go index 883975cc0f..e839c30f66 100644 --- a/vendor/github.com/google/go-github/v45/github/git_refs.go +++ b/vendor/github.com/google/go-github/v53/github/git_refs.go @@ -142,7 +142,7 @@ func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, r // GitHub API docs: https://docs.github.com/en/rest/git/refs#update-a-reference func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) { refPath := strings.TrimPrefix(*ref.Ref, "refs/") - u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath) + u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refURLEscape(refPath)) req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{ SHA: ref.Object.SHA, Force: &force, diff --git a/vendor/github.com/google/go-github/v45/github/git_tags.go b/vendor/github.com/google/go-github/v53/github/git_tags.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/git_tags.go rename to vendor/github.com/google/go-github/v53/github/git_tags.go diff --git a/vendor/github.com/google/go-github/v45/github/git_trees.go b/vendor/github.com/google/go-github/v53/github/git_trees.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/git_trees.go rename to vendor/github.com/google/go-github/v53/github/git_trees.go diff --git a/vendor/github.com/google/go-github/v45/github/github-accessors.go b/vendor/github.com/google/go-github/v53/github/github-accessors.go similarity index 87% rename from vendor/github.com/google/go-github/v45/github/github-accessors.go rename to vendor/github.com/google/go-github/v53/github/github-accessors.go index 0092c58840..a9aaee814a 100644 --- a/vendor/github.com/google/go-github/v45/github/github-accessors.go +++ b/vendor/github.com/google/go-github/v53/github/github-accessors.go @@ -38,6 +38,94 @@ func (a *ActionsAllowed) GetVerifiedAllowed() bool { return *a.VerifiedAllowed } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetCreatedAt() Timestamp { + if a == nil || a.CreatedAt == nil { + return Timestamp{} + } + return *a.CreatedAt +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetID() int64 { + if a == nil || a.ID == nil { + return 0 + } + return *a.ID +} + +// GetKey returns the Key field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetKey() string { + if a == nil || a.Key == nil { + return "" + } + return *a.Key +} + +// GetLastAccessedAt returns the LastAccessedAt field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetLastAccessedAt() Timestamp { + if a == nil || a.LastAccessedAt == nil { + return Timestamp{} + } + return *a.LastAccessedAt +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetRef() string { + if a == nil || a.Ref == nil { + return "" + } + return *a.Ref +} + +// GetSizeInBytes returns the SizeInBytes field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetSizeInBytes() int64 { + if a == nil || a.SizeInBytes == nil { + return 0 + } + return *a.SizeInBytes +} + +// GetVersion returns the Version field if it's non-nil, zero value otherwise. +func (a *ActionsCache) GetVersion() string { + if a == nil || a.Version == nil { + return "" + } + return *a.Version +} + +// GetDirection returns the Direction field if it's non-nil, zero value otherwise. +func (a *ActionsCacheListOptions) GetDirection() string { + if a == nil || a.Direction == nil { + return "" + } + return *a.Direction +} + +// GetKey returns the Key field if it's non-nil, zero value otherwise. +func (a *ActionsCacheListOptions) GetKey() string { + if a == nil || a.Key == nil { + return "" + } + return *a.Key +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (a *ActionsCacheListOptions) GetRef() string { + if a == nil || a.Ref == nil { + return "" + } + return *a.Ref +} + +// GetSort returns the Sort field if it's non-nil, zero value otherwise. +func (a *ActionsCacheListOptions) GetSort() string { + if a == nil || a.Sort == nil { + return "" + } + return *a.Sort +} + // GetAllowedActions returns the AllowedActions field if it's non-nil, zero value otherwise. func (a *ActionsPermissions) GetAllowedActions() string { if a == nil || a.AllowedActions == nil { @@ -86,6 +174,62 @@ func (a *ActionsPermissionsRepository) GetSelectedActionsURL() string { return *a.SelectedActionsURL } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (a *ActionsVariable) GetCreatedAt() Timestamp { + if a == nil || a.CreatedAt == nil { + return Timestamp{} + } + return *a.CreatedAt +} + +// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise. +func (a *ActionsVariable) GetSelectedRepositoriesURL() string { + if a == nil || a.SelectedRepositoriesURL == nil { + return "" + } + return *a.SelectedRepositoriesURL +} + +// GetSelectedRepositoryIDs returns the SelectedRepositoryIDs field. +func (a *ActionsVariable) GetSelectedRepositoryIDs() *SelectedRepoIDs { + if a == nil { + return nil + } + return a.SelectedRepositoryIDs +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (a *ActionsVariable) GetUpdatedAt() Timestamp { + if a == nil || a.UpdatedAt == nil { + return Timestamp{} + } + return *a.UpdatedAt +} + +// GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. +func (a *ActionsVariable) GetVisibility() string { + if a == nil || a.Visibility == nil { + return "" + } + return *a.Visibility +} + +// GetCountryCode returns the CountryCode field if it's non-nil, zero value otherwise. +func (a *ActorLocation) GetCountryCode() string { + if a == nil || a.CountryCode == nil { + return "" + } + return *a.CountryCode +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (a *AdminEnforcedChanges) GetFrom() bool { + if a == nil || a.From == nil { + return false + } + return *a.From +} + // GetURL returns the URL field if it's non-nil, zero value otherwise. func (a *AdminEnforcement) GetURL() string { if a == nil || a.URL == nil { @@ -198,6 +342,38 @@ func (a *AdvancedSecurityCommittersBreakdown) GetUserLogin() string { return *a.UserLogin } +// GetScore returns the Score field. +func (a *AdvisoryCVSs) GetScore() *float64 { + if a == nil { + return nil + } + return a.Score +} + +// GetVectorString returns the VectorString field if it's non-nil, zero value otherwise. +func (a *AdvisoryCVSs) GetVectorString() string { + if a == nil || a.VectorString == nil { + return "" + } + return *a.VectorString +} + +// GetCWEID returns the CWEID field if it's non-nil, zero value otherwise. +func (a *AdvisoryCWEs) GetCWEID() string { + if a == nil || a.CWEID == nil { + return "" + } + return *a.CWEID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (a *AdvisoryCWEs) GetName() string { + if a == nil || a.Name == nil { + return "" + } + return *a.Name +} + // GetType returns the Type field if it's non-nil, zero value otherwise. func (a *AdvisoryIdentifier) GetType() string { if a == nil || a.Type == nil { @@ -294,6 +470,14 @@ func (a *Alert) GetDismissedBy() *User { return a.DismissedBy } +// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. +func (a *Alert) GetDismissedComment() string { + if a == nil || a.DismissedComment == nil { + return "" + } + return *a.DismissedComment +} + // GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. func (a *Alert) GetDismissedReason() string { if a == nil || a.DismissedReason == nil { @@ -414,6 +598,22 @@ func (a *Alert) GetURL() string { return *a.URL } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (a *AllowDeletionsEnforcementLevelChanges) GetFrom() string { + if a == nil || a.From == nil { + return "" + } + return *a.From +} + +// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. +func (a *AllowForkSyncing) GetEnabled() bool { + if a == nil || a.Enabled == nil { + return false + } + return *a.Enabled +} + // GetRef returns the Ref field if it's non-nil, zero value otherwise. func (a *AnalysesListOptions) GetRef() string { if a == nil || a.Ref == nil { @@ -486,6 +686,14 @@ func (a *App) GetID() int64 { return *a.ID } +// GetInstallationsCount returns the InstallationsCount field if it's non-nil, zero value otherwise. +func (a *App) GetInstallationsCount() int { + if a == nil || a.InstallationsCount == nil { + return 0 + } + return *a.InstallationsCount +} + // GetName returns the Name field if it's non-nil, zero value otherwise. func (a *App) GetName() string { if a == nil || a.Name == nil { @@ -710,6 +918,30 @@ func (a *Artifact) GetSizeInBytes() int64 { return *a.SizeInBytes } +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (a *Artifact) GetUpdatedAt() Timestamp { + if a == nil || a.UpdatedAt == nil { + return Timestamp{} + } + return *a.UpdatedAt +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (a *Artifact) GetURL() string { + if a == nil || a.URL == nil { + return "" + } + return *a.URL +} + +// GetWorkflowRun returns the WorkflowRun field. +func (a *Artifact) GetWorkflowRun() *ArtifactWorkflowRun { + if a == nil { + return nil + } + return a.WorkflowRun +} + // GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. func (a *ArtifactList) GetTotalCount() int64 { if a == nil || a.TotalCount == nil { @@ -718,6 +950,46 @@ func (a *ArtifactList) GetTotalCount() int64 { return *a.TotalCount } +// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. +func (a *ArtifactWorkflowRun) GetHeadBranch() string { + if a == nil || a.HeadBranch == nil { + return "" + } + return *a.HeadBranch +} + +// GetHeadRepositoryID returns the HeadRepositoryID field if it's non-nil, zero value otherwise. +func (a *ArtifactWorkflowRun) GetHeadRepositoryID() int64 { + if a == nil || a.HeadRepositoryID == nil { + return 0 + } + return *a.HeadRepositoryID +} + +// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. +func (a *ArtifactWorkflowRun) GetHeadSHA() string { + if a == nil || a.HeadSHA == nil { + return "" + } + return *a.HeadSHA +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (a *ArtifactWorkflowRun) GetID() int64 { + if a == nil || a.ID == nil { + return 0 + } + return *a.ID +} + +// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. +func (a *ArtifactWorkflowRun) GetRepositoryID() int64 { + if a == nil || a.RepositoryID == nil { + return 0 + } + return *a.RepositoryID +} + // GetBody returns the Body field if it's non-nil, zero value otherwise. func (a *Attachment) GetBody() string { if a == nil || a.Body == nil { @@ -774,6 +1046,22 @@ func (a *AuditEntry) GetActor() string { return *a.Actor } +// GetActorIP returns the ActorIP field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetActorIP() string { + if a == nil || a.ActorIP == nil { + return "" + } + return *a.ActorIP +} + +// GetActorLocation returns the ActorLocation field. +func (a *AuditEntry) GetActorLocation() *ActorLocation { + if a == nil { + return nil + } + return a.ActorLocation +} + // GetBlockedUser returns the BlockedUser field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetBlockedUser() string { if a == nil || a.BlockedUser == nil { @@ -902,6 +1190,14 @@ func (a *AuditEntry) GetFingerprint() string { return *a.Fingerprint } +// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetHashedToken() string { + if a == nil || a.HashedToken == nil { + return "" + } + return *a.HashedToken +} + // GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetHeadBranch() string { if a == nil || a.HeadBranch == nil { @@ -942,6 +1238,14 @@ func (a *AuditEntry) GetJobName() string { return *a.JobName } +// GetJobWorkflowRef returns the JobWorkflowRef field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetJobWorkflowRef() string { + if a == nil || a.JobWorkflowRef == nil { + return "" + } + return *a.JobWorkflowRef +} + // GetLimitedAvailability returns the LimitedAvailability field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetLimitedAvailability() bool { if a == nil || a.LimitedAvailability == nil { @@ -966,6 +1270,22 @@ func (a *AuditEntry) GetName() string { return *a.Name } +// GetOAuthApplicationID returns the OAuthApplicationID field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetOAuthApplicationID() int64 { + if a == nil || a.OAuthApplicationID == nil { + return 0 + } + return *a.OAuthApplicationID +} + +// GetOldPermission returns the OldPermission field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetOldPermission() string { + if a == nil || a.OldPermission == nil { + return "" + } + return *a.OldPermission +} + // GetOldUser returns the OldUser field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetOldUser() string { if a == nil || a.OldUser == nil { @@ -982,6 +1302,14 @@ func (a *AuditEntry) GetOpenSSHPublicKey() string { return *a.OpenSSHPublicKey } +// GetOperationType returns the OperationType field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetOperationType() string { + if a == nil || a.OperationType == nil { + return "" + } + return *a.OperationType +} + // GetOrg returns the Org field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetOrg() string { if a == nil || a.Org == nil { @@ -990,6 +1318,22 @@ func (a *AuditEntry) GetOrg() string { return *a.Org } +// GetOrgID returns the OrgID field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetOrgID() int64 { + if a == nil || a.OrgID == nil { + return 0 + } + return *a.OrgID +} + +// GetPermission returns the Permission field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetPermission() string { + if a == nil || a.Permission == nil { + return "" + } + return *a.Permission +} + // GetPreviousVisibility returns the PreviousVisibility field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetPreviousVisibility() string { if a == nil || a.PreviousVisibility == nil { @@ -998,6 +1342,38 @@ func (a *AuditEntry) GetPreviousVisibility() string { return *a.PreviousVisibility } +// GetProgrammaticAccessType returns the ProgrammaticAccessType field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetProgrammaticAccessType() string { + if a == nil || a.ProgrammaticAccessType == nil { + return "" + } + return *a.ProgrammaticAccessType +} + +// GetPullRequestID returns the PullRequestID field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetPullRequestID() int64 { + if a == nil || a.PullRequestID == nil { + return 0 + } + return *a.PullRequestID +} + +// GetPullRequestTitle returns the PullRequestTitle field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetPullRequestTitle() string { + if a == nil || a.PullRequestTitle == nil { + return "" + } + return *a.PullRequestTitle +} + +// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetPullRequestURL() string { + if a == nil || a.PullRequestURL == nil { + return "" + } + return *a.PullRequestURL +} + // GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetReadOnly() string { if a == nil || a.ReadOnly == nil { @@ -1030,6 +1406,14 @@ func (a *AuditEntry) GetRepositoryPublic() bool { return *a.RepositoryPublic } +// GetRunAttempt returns the RunAttempt field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetRunAttempt() int64 { + if a == nil || a.RunAttempt == nil { + return 0 + } + return *a.RunAttempt +} + // GetRunnerGroupID returns the RunnerGroupID field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetRunnerGroupID() int64 { if a == nil || a.RunnerGroupID == nil { @@ -1062,6 +1446,14 @@ func (a *AuditEntry) GetRunnerName() string { return *a.RunnerName } +// GetRunNumber returns the RunNumber field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetRunNumber() int64 { + if a == nil || a.RunNumber == nil { + return 0 + } + return *a.RunNumber +} + // GetSourceVersion returns the SourceVersion field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetSourceVersion() string { if a == nil || a.SourceVersion == nil { @@ -1110,6 +1502,30 @@ func (a *AuditEntry) GetTimestamp() Timestamp { return *a.Timestamp } +// GetTokenID returns the TokenID field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetTokenID() int64 { + if a == nil || a.TokenID == nil { + return 0 + } + return *a.TokenID +} + +// GetTokenScopes returns the TokenScopes field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetTokenScopes() string { + if a == nil || a.TokenScopes == nil { + return "" + } + return *a.TokenScopes +} + +// GetTopic returns the Topic field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetTopic() string { + if a == nil || a.Topic == nil { + return "" + } + return *a.Topic +} + // GetTransportProtocol returns the TransportProtocol field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetTransportProtocol() int { if a == nil || a.TransportProtocol == nil { @@ -1142,6 +1558,14 @@ func (a *AuditEntry) GetUser() string { return *a.User } +// GetUserAgent returns the UserAgent field if it's non-nil, zero value otherwise. +func (a *AuditEntry) GetUserAgent() string { + if a == nil || a.UserAgent == nil { + return "" + } + return *a.UserAgent +} + // GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. func (a *AuditEntry) GetVisibility() string { if a == nil || a.Visibility == nil { @@ -1358,6 +1782,14 @@ func (a *AuthorizedActorsOnly) GetFrom() bool { return *a.From } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (a *AuthorizedDismissalActorsOnlyChanges) GetFrom() bool { + if a == nil || a.From == nil { + return false + } + return *a.From +} + // GetID returns the ID field if it's non-nil, zero value otherwise. func (a *Autolink) GetID() int64 { if a == nil || a.ID == nil { @@ -1366,6 +1798,14 @@ func (a *Autolink) GetID() int64 { return *a.ID } +// GetIsAlphanumeric returns the IsAlphanumeric field if it's non-nil, zero value otherwise. +func (a *Autolink) GetIsAlphanumeric() bool { + if a == nil || a.IsAlphanumeric == nil { + return false + } + return *a.IsAlphanumeric +} + // GetKeyPrefix returns the KeyPrefix field if it's non-nil, zero value otherwise. func (a *Autolink) GetKeyPrefix() string { if a == nil || a.KeyPrefix == nil { @@ -1382,6 +1822,14 @@ func (a *Autolink) GetURLTemplate() string { return *a.URLTemplate } +// GetIsAlphanumeric returns the IsAlphanumeric field if it's non-nil, zero value otherwise. +func (a *AutolinkOptions) GetIsAlphanumeric() bool { + if a == nil || a.IsAlphanumeric == nil { + return false + } + return *a.IsAlphanumeric +} + // GetKeyPrefix returns the KeyPrefix field if it's non-nil, zero value otherwise. func (a *AutolinkOptions) GetKeyPrefix() string { if a == nil || a.KeyPrefix == nil { @@ -1462,6 +1910,14 @@ func (b *Blob) GetURL() string { return *b.URL } +// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. +func (b *BlockCreations) GetEnabled() bool { + if b == nil || b.Enabled == nil { + return false + } + return *b.Enabled +} + // GetCommit returns the Commit field. func (b *Branch) GetCommit() *RepositoryCommit { if b == nil { @@ -2286,6 +2742,14 @@ func (c *CodeOfConduct) GetURL() string { return *c.URL } +// GetSuggestion returns the Suggestion field if it's non-nil, zero value otherwise. +func (c *CodeownersError) GetSuggestion() string { + if c == nil || c.Suggestion == nil { + return "" + } + return *c.Suggestion +} + // GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. func (c *CodeResult) GetHTMLURL() string { if c == nil || c.HTMLURL == nil { @@ -2350,6 +2814,14 @@ func (c *CodeScanningAlertEvent) GetCommitOID() string { return *c.CommitOID } +// GetInstallation returns the Installation field. +func (c *CodeScanningAlertEvent) GetInstallation() *Installation { + if c == nil { + return nil + } + return c.Installation +} + // GetOrg returns the Org field. func (c *CodeScanningAlertEvent) GetOrg() *Organization { if c == nil { @@ -2382,6 +2854,22 @@ func (c *CodeScanningAlertEvent) GetSender() *User { return c.Sender } +// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. +func (c *CodeScanningAlertState) GetDismissedComment() string { + if c == nil || c.DismissedComment == nil { + return "" + } + return *c.DismissedComment +} + +// GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. +func (c *CodeScanningAlertState) GetDismissedReason() string { + if c == nil || c.DismissedReason == nil { + return "" + } + return *c.DismissedReason +} + // GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. func (c *CodeSearchResult) GetIncompleteResults() bool { if c == nil || c.IncompleteResults == nil { @@ -2507,15 +2995,119 @@ func (c *CombinedStatus) GetTotalCount() int { if c == nil || c.TotalCount == nil { return 0 } - return *c.TotalCount + return *c.TotalCount +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (c *Comment) GetCreatedAt() Timestamp { + if c == nil || c.CreatedAt == nil { + return Timestamp{} + } + return *c.CreatedAt +} + +// GetAuthorAssociation returns the AuthorAssociation field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetAuthorAssociation() string { + if c == nil || c.AuthorAssociation == nil { + return "" + } + return *c.AuthorAssociation +} + +// GetBody returns the Body field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetBody() string { + if c == nil || c.Body == nil { + return "" + } + return *c.Body +} + +// GetChildCommentCount returns the ChildCommentCount field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetChildCommentCount() int { + if c == nil || c.ChildCommentCount == nil { + return 0 + } + return *c.ChildCommentCount +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetCreatedAt() Timestamp { + if c == nil || c.CreatedAt == nil { + return Timestamp{} + } + return *c.CreatedAt +} + +// GetDiscussionID returns the DiscussionID field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetDiscussionID() int64 { + if c == nil || c.DiscussionID == nil { + return 0 + } + return *c.DiscussionID +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetHTMLURL() string { + if c == nil || c.HTMLURL == nil { + return "" + } + return *c.HTMLURL +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetID() int64 { + if c == nil || c.ID == nil { + return 0 + } + return *c.ID +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetNodeID() string { + if c == nil || c.NodeID == nil { + return "" + } + return *c.NodeID +} + +// GetParentID returns the ParentID field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetParentID() int64 { + if c == nil || c.ParentID == nil { + return 0 + } + return *c.ParentID +} + +// GetReactions returns the Reactions field. +func (c *CommentDiscussion) GetReactions() *Reactions { + if c == nil { + return nil + } + return c.Reactions +} + +// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetRepositoryURL() string { + if c == nil || c.RepositoryURL == nil { + return "" + } + return *c.RepositoryURL +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (c *CommentDiscussion) GetUpdatedAt() Timestamp { + if c == nil || c.UpdatedAt == nil { + return Timestamp{} + } + return *c.UpdatedAt } -// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (c *Comment) GetCreatedAt() time.Time { - if c == nil || c.CreatedAt == nil { - return time.Time{} +// GetUser returns the User field. +func (c *CommentDiscussion) GetUser() *User { + if c == nil { + return nil } - return *c.CreatedAt + return c.User } // GetTotalCommitComments returns the TotalCommitComments field if it's non-nil, zero value otherwise. @@ -2639,9 +3231,9 @@ func (c *Commit) GetVerification() *SignatureVerification { } // GetDate returns the Date field if it's non-nil, zero value otherwise. -func (c *CommitAuthor) GetDate() time.Time { +func (c *CommitAuthor) GetDate() Timestamp { if c == nil || c.Date == nil { - return time.Time{} + return Timestamp{} } return *c.Date } @@ -3095,9 +3687,9 @@ func (c *CommunityHealthMetrics) GetHealthPercentage() int { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (c *CommunityHealthMetrics) GetUpdatedAt() time.Time { +func (c *CommunityHealthMetrics) GetUpdatedAt() Timestamp { if c == nil || c.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *c.UpdatedAt } @@ -3510,6 +4102,38 @@ func (c *CreateOrgInvitationOptions) GetRole() string { return *c.Role } +// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateCustomRoleOptions) GetBaseRole() string { + if c == nil || c.BaseRole == nil { + return "" + } + return *c.BaseRole +} + +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateCustomRoleOptions) GetDescription() string { + if c == nil || c.Description == nil { + return "" + } + return *c.Description +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateCustomRoleOptions) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (c *CreateProtectedChanges) GetFrom() bool { + if c == nil || c.From == nil { + return false + } + return *c.From +} + // GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise. func (c *CreateRunnerGroupRequest) GetAllowsPublicRepositories() bool { if c == nil || c.AllowsPublicRepositories == nil { @@ -3526,6 +4150,14 @@ func (c *CreateRunnerGroupRequest) GetName() string { return *c.Name } +// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. +func (c *CreateRunnerGroupRequest) GetRestrictedToWorkflows() bool { + if c == nil || c.RestrictedToWorkflows == nil { + return false + } + return *c.RestrictedToWorkflows +} + // GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. func (c *CreateRunnerGroupRequest) GetVisibility() string { if c == nil || c.Visibility == nil { @@ -3534,92 +4166,364 @@ func (c *CreateRunnerGroupRequest) GetVisibility() string { return *c.Visibility } +// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise. +func (c *CreateUpdateEnvironment) GetCanAdminsBypass() bool { + if c == nil || c.CanAdminsBypass == nil { + return false + } + return *c.CanAdminsBypass +} + // GetDeploymentBranchPolicy returns the DeploymentBranchPolicy field. func (c *CreateUpdateEnvironment) GetDeploymentBranchPolicy() *BranchPolicy { if c == nil { return nil } - return c.DeploymentBranchPolicy + return c.DeploymentBranchPolicy +} + +// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. +func (c *CreateUpdateEnvironment) GetWaitTimer() int { + if c == nil || c.WaitTimer == nil { + return 0 + } + return *c.WaitTimer +} + +// GetRepositoryID returns the RepositoryID field if it's non-nil, zero value otherwise. +func (c *CreateUpdateRequiredWorkflowOptions) GetRepositoryID() int64 { + if c == nil || c.RepositoryID == nil { + return 0 + } + return *c.RepositoryID +} + +// GetScope returns the Scope field if it's non-nil, zero value otherwise. +func (c *CreateUpdateRequiredWorkflowOptions) GetScope() string { + if c == nil || c.Scope == nil { + return "" + } + return *c.Scope +} + +// GetSelectedRepositoryIDs returns the SelectedRepositoryIDs field. +func (c *CreateUpdateRequiredWorkflowOptions) GetSelectedRepositoryIDs() *SelectedRepoIDs { + if c == nil { + return nil + } + return c.SelectedRepositoryIDs +} + +// GetWorkflowFilePath returns the WorkflowFilePath field if it's non-nil, zero value otherwise. +func (c *CreateUpdateRequiredWorkflowOptions) GetWorkflowFilePath() string { + if c == nil || c.WorkflowFilePath == nil { + return "" + } + return *c.WorkflowFilePath +} + +// GetBody returns the Body field if it's non-nil, zero value otherwise. +func (c *CreateUserProjectOptions) GetBody() string { + if c == nil || c.Body == nil { + return "" + } + return *c.Body +} + +// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetBaseRole() string { + if c == nil || c.BaseRole == nil { + return "" + } + return *c.BaseRole +} + +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetDescription() string { + if c == nil || c.Description == nil { + return "" + } + return *c.Description +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetID() int64 { + if c == nil || c.ID == nil { + return 0 + } + return *c.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetInstallation returns the Installation field. +func (d *DeleteEvent) GetInstallation() *Installation { + if d == nil { + return nil + } + return d.Installation +} + +// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. +func (d *DeleteEvent) GetPusherType() string { + if d == nil || d.PusherType == nil { + return "" + } + return *d.PusherType +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (d *DeleteEvent) GetRef() string { + if d == nil || d.Ref == nil { + return "" + } + return *d.Ref +} + +// GetRefType returns the RefType field if it's non-nil, zero value otherwise. +func (d *DeleteEvent) GetRefType() string { + if d == nil || d.RefType == nil { + return "" + } + return *d.RefType +} + +// GetRepo returns the Repo field. +func (d *DeleteEvent) GetRepo() *Repository { + if d == nil { + return nil + } + return d.Repo +} + +// GetSender returns the Sender field. +func (d *DeleteEvent) GetSender() *User { + if d == nil { + return nil + } + return d.Sender +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetCreatedAt() Timestamp { + if d == nil || d.CreatedAt == nil { + return Timestamp{} + } + return *d.CreatedAt +} + +// GetDependency returns the Dependency field. +func (d *DependabotAlert) GetDependency() *Dependency { + if d == nil { + return nil + } + return d.Dependency +} + +// GetDismissedAt returns the DismissedAt field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetDismissedAt() Timestamp { + if d == nil || d.DismissedAt == nil { + return Timestamp{} + } + return *d.DismissedAt +} + +// GetDismissedBy returns the DismissedBy field. +func (d *DependabotAlert) GetDismissedBy() *User { + if d == nil { + return nil + } + return d.DismissedBy +} + +// GetDismissedComment returns the DismissedComment field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetDismissedComment() string { + if d == nil || d.DismissedComment == nil { + return "" + } + return *d.DismissedComment +} + +// GetDismissedReason returns the DismissedReason field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetDismissedReason() string { + if d == nil || d.DismissedReason == nil { + return "" + } + return *d.DismissedReason +} + +// GetFixedAt returns the FixedAt field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetFixedAt() Timestamp { + if d == nil || d.FixedAt == nil { + return Timestamp{} + } + return *d.FixedAt +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetHTMLURL() string { + if d == nil || d.HTMLURL == nil { + return "" + } + return *d.HTMLURL +} + +// GetNumber returns the Number field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetNumber() int { + if d == nil || d.Number == nil { + return 0 + } + return *d.Number +} + +// GetRepository returns the Repository field. +func (d *DependabotAlert) GetRepository() *Repository { + if d == nil { + return nil + } + return d.Repository +} + +// GetSecurityAdvisory returns the SecurityAdvisory field. +func (d *DependabotAlert) GetSecurityAdvisory() *DependabotSecurityAdvisory { + if d == nil { + return nil + } + return d.SecurityAdvisory +} + +// GetSecurityVulnerability returns the SecurityVulnerability field. +func (d *DependabotAlert) GetSecurityVulnerability() *AdvisoryVulnerability { + if d == nil { + return nil + } + return d.SecurityVulnerability +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetState() string { + if d == nil || d.State == nil { + return "" + } + return *d.State +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetUpdatedAt() Timestamp { + if d == nil || d.UpdatedAt == nil { + return Timestamp{} + } + return *d.UpdatedAt +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (d *DependabotAlert) GetURL() string { + if d == nil || d.URL == nil { + return "" + } + return *d.URL +} + +// GetCVEID returns the CVEID field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetCVEID() string { + if d == nil || d.CVEID == nil { + return "" + } + return *d.CVEID +} + +// GetCVSs returns the CVSs field. +func (d *DependabotSecurityAdvisory) GetCVSs() *AdvisoryCVSs { + if d == nil { + return nil + } + return d.CVSs } -// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. -func (c *CreateUpdateEnvironment) GetWaitTimer() int { - if c == nil || c.WaitTimer == nil { - return 0 +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetDescription() string { + if d == nil || d.Description == nil { + return "" } - return *c.WaitTimer + return *d.Description } -// GetBody returns the Body field if it's non-nil, zero value otherwise. -func (c *CreateUserProjectOptions) GetBody() string { - if c == nil || c.Body == nil { +// GetGHSAID returns the GHSAID field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetGHSAID() string { + if d == nil || d.GHSAID == nil { return "" } - return *c.Body + return *d.GHSAID } -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetID() int64 { - if c == nil || c.ID == nil { - return 0 +// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetPublishedAt() Timestamp { + if d == nil || d.PublishedAt == nil { + return Timestamp{} } - return *c.ID + return *d.PublishedAt } -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CustomRepoRoles) GetName() string { - if c == nil || c.Name == nil { +// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetSeverity() string { + if d == nil || d.Severity == nil { return "" } - return *c.Name + return *d.Severity } -// GetInstallation returns the Installation field. -func (d *DeleteEvent) GetInstallation() *Installation { - if d == nil { - return nil +// GetSummary returns the Summary field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetSummary() string { + if d == nil || d.Summary == nil { + return "" } - return d.Installation + return *d.Summary } -// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetPusherType() string { - if d == nil || d.PusherType == nil { - return "" +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetUpdatedAt() Timestamp { + if d == nil || d.UpdatedAt == nil { + return Timestamp{} } - return *d.PusherType + return *d.UpdatedAt } -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRef() string { - if d == nil || d.Ref == nil { - return "" +// GetWithdrawnAt returns the WithdrawnAt field if it's non-nil, zero value otherwise. +func (d *DependabotSecurityAdvisory) GetWithdrawnAt() Timestamp { + if d == nil || d.WithdrawnAt == nil { + return Timestamp{} } - return *d.Ref + return *d.WithdrawnAt } -// GetRefType returns the RefType field if it's non-nil, zero value otherwise. -func (d *DeleteEvent) GetRefType() string { - if d == nil || d.RefType == nil { +// GetManifestPath returns the ManifestPath field if it's non-nil, zero value otherwise. +func (d *Dependency) GetManifestPath() string { + if d == nil || d.ManifestPath == nil { return "" } - return *d.RefType + return *d.ManifestPath } -// GetRepo returns the Repo field. -func (d *DeleteEvent) GetRepo() *Repository { +// GetPackage returns the Package field. +func (d *Dependency) GetPackage() *VulnerabilityPackage { if d == nil { return nil } - return d.Repo + return d.Package } -// GetSender returns the Sender field. -func (d *DeleteEvent) GetSender() *User { - if d == nil { - return nil +// GetScope returns the Scope field if it's non-nil, zero value otherwise. +func (d *Dependency) GetScope() string { + if d == nil || d.Scope == nil { + return "" } - return d.Sender + return *d.Scope } // GetAction returns the Action field if it's non-nil, zero value otherwise. @@ -3774,6 +4678,46 @@ func (d *Deployment) GetURL() string { return *d.URL } +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (d *DeploymentBranchPolicy) GetID() int64 { + if d == nil || d.ID == nil { + return 0 + } + return *d.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (d *DeploymentBranchPolicy) GetName() string { + if d == nil || d.Name == nil { + return "" + } + return *d.Name +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (d *DeploymentBranchPolicy) GetNodeID() string { + if d == nil || d.NodeID == nil { + return "" + } + return *d.NodeID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (d *DeploymentBranchPolicyRequest) GetName() string { + if d == nil || d.Name == nil { + return "" + } + return *d.Name +} + +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (d *DeploymentBranchPolicyResponse) GetTotalCount() int { + if d == nil || d.TotalCount == nil { + return 0 + } + return *d.TotalCount +} + // GetDeployment returns the Deployment field. func (d *DeploymentEvent) GetDeployment() *Deployment { if d == nil { @@ -3806,6 +4750,78 @@ func (d *DeploymentEvent) GetSender() *User { return d.Sender } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (d *DeploymentProtectionRuleEvent) GetAction() string { + if d == nil || d.Action == nil { + return "" + } + return *d.Action +} + +// GetDeployment returns the Deployment field. +func (d *DeploymentProtectionRuleEvent) GetDeployment() *Deployment { + if d == nil { + return nil + } + return d.Deployment +} + +// GetDeploymentCallbackURL returns the DeploymentCallbackURL field if it's non-nil, zero value otherwise. +func (d *DeploymentProtectionRuleEvent) GetDeploymentCallbackURL() string { + if d == nil || d.DeploymentCallbackURL == nil { + return "" + } + return *d.DeploymentCallbackURL +} + +// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. +func (d *DeploymentProtectionRuleEvent) GetEnvironment() string { + if d == nil || d.Environment == nil { + return "" + } + return *d.Environment +} + +// GetEvent returns the Event field if it's non-nil, zero value otherwise. +func (d *DeploymentProtectionRuleEvent) GetEvent() string { + if d == nil || d.Event == nil { + return "" + } + return *d.Event +} + +// GetInstallation returns the Installation field. +func (d *DeploymentProtectionRuleEvent) GetInstallation() *Installation { + if d == nil { + return nil + } + return d.Installation +} + +// GetOrganization returns the Organization field. +func (d *DeploymentProtectionRuleEvent) GetOrganization() *Organization { + if d == nil { + return nil + } + return d.Organization +} + +// GetRepo returns the Repo field. +func (d *DeploymentProtectionRuleEvent) GetRepo() *Repository { + if d == nil { + return nil + } + return d.Repo +} + +// GetSender returns the Sender field. +func (d *DeploymentProtectionRuleEvent) GetSender() *User { + if d == nil { + return nil + } + return d.Sender +} + // GetAutoMerge returns the AutoMerge field if it's non-nil, zero value otherwise. func (d *DeploymentRequest) GetAutoMerge() bool { if d == nil || d.AutoMerge == nil { @@ -4406,6 +5422,62 @@ func (d *DiscussionComment) GetURL() string { return *d.URL } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (d *DiscussionCommentEvent) GetAction() string { + if d == nil || d.Action == nil { + return "" + } + return *d.Action +} + +// GetComment returns the Comment field. +func (d *DiscussionCommentEvent) GetComment() *CommentDiscussion { + if d == nil { + return nil + } + return d.Comment +} + +// GetDiscussion returns the Discussion field. +func (d *DiscussionCommentEvent) GetDiscussion() *Discussion { + if d == nil { + return nil + } + return d.Discussion +} + +// GetInstallation returns the Installation field. +func (d *DiscussionCommentEvent) GetInstallation() *Installation { + if d == nil { + return nil + } + return d.Installation +} + +// GetOrg returns the Org field. +func (d *DiscussionCommentEvent) GetOrg() *Organization { + if d == nil { + return nil + } + return d.Org +} + +// GetRepo returns the Repo field. +func (d *DiscussionCommentEvent) GetRepo() *Repository { + if d == nil { + return nil + } + return d.Repo +} + +// GetSender returns the Sender field. +func (d *DiscussionCommentEvent) GetSender() *User { + if d == nil { + return nil + } + return d.Sender +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (d *DiscussionEvent) GetAction() string { if d == nil || d.Action == nil { @@ -4454,6 +5526,14 @@ func (d *DiscussionEvent) GetSender() *User { return d.Sender } +// GetApps returns the Apps field if it's non-nil, zero value otherwise. +func (d *DismissalRestrictionsRequest) GetApps() []string { + if d == nil || d.Apps == nil { + return nil + } + return *d.Apps +} + // GetTeams returns the Teams field if it's non-nil, zero value otherwise. func (d *DismissalRestrictionsRequest) GetTeams() []string { if d == nil || d.Teams == nil { @@ -4502,6 +5582,14 @@ func (d *DismissedReview) GetState() string { return *d.State } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (d *DismissStaleReviewsOnPushChanges) GetFrom() bool { + if d == nil || d.From == nil { + return false + } + return *d.From +} + // GetClientPayload returns the ClientPayload field if it's non-nil, zero value otherwise. func (d *DispatchRequestOptions) GetClientPayload() json.RawMessage { if d == nil || d.ClientPayload == nil { @@ -4606,6 +5694,14 @@ func (e *EditChange) GetBody() *EditBody { return e.Body } +// GetOwner returns the Owner field. +func (e *EditChange) GetOwner() *EditOwner { + if e == nil { + return nil + } + return e.Owner +} + // GetRepo returns the Repo field. func (e *EditChange) GetRepo() *EditRepo { if e == nil { @@ -4622,6 +5718,14 @@ func (e *EditChange) GetTitle() *EditTitle { return e.Title } +// GetOwnerInfo returns the OwnerInfo field. +func (e *EditOwner) GetOwnerInfo() *OwnerInfo { + if e == nil { + return nil + } + return e.OwnerInfo +} + // GetFrom returns the From field if it's non-nil, zero value otherwise. func (e *EditRef) GetFrom() string { if e == nil || e.From == nil { @@ -4734,6 +5838,46 @@ func (e *Enterprise) GetWebsiteURL() string { return *e.WebsiteURL } +// GetAdvancedSecurityEnabledForNewRepositories returns the AdvancedSecurityEnabledForNewRepositories field if it's non-nil, zero value otherwise. +func (e *EnterpriseSecurityAnalysisSettings) GetAdvancedSecurityEnabledForNewRepositories() bool { + if e == nil || e.AdvancedSecurityEnabledForNewRepositories == nil { + return false + } + return *e.AdvancedSecurityEnabledForNewRepositories +} + +// GetSecretScanningEnabledForNewRepositories returns the SecretScanningEnabledForNewRepositories field if it's non-nil, zero value otherwise. +func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningEnabledForNewRepositories() bool { + if e == nil || e.SecretScanningEnabledForNewRepositories == nil { + return false + } + return *e.SecretScanningEnabledForNewRepositories +} + +// GetSecretScanningPushProtectionCustomLink returns the SecretScanningPushProtectionCustomLink field if it's non-nil, zero value otherwise. +func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningPushProtectionCustomLink() string { + if e == nil || e.SecretScanningPushProtectionCustomLink == nil { + return "" + } + return *e.SecretScanningPushProtectionCustomLink +} + +// GetSecretScanningPushProtectionEnabledForNewRepositories returns the SecretScanningPushProtectionEnabledForNewRepositories field if it's non-nil, zero value otherwise. +func (e *EnterpriseSecurityAnalysisSettings) GetSecretScanningPushProtectionEnabledForNewRepositories() bool { + if e == nil || e.SecretScanningPushProtectionEnabledForNewRepositories == nil { + return false + } + return *e.SecretScanningPushProtectionEnabledForNewRepositories +} + +// GetCanAdminsBypass returns the CanAdminsBypass field if it's non-nil, zero value otherwise. +func (e *Environment) GetCanAdminsBypass() bool { + if e == nil || e.CanAdminsBypass == nil { + return false + } + return *e.CanAdminsBypass +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (e *Environment) GetCreatedAt() Timestamp { if e == nil || e.CreatedAt == nil { @@ -4879,9 +6023,9 @@ func (e *Event) GetActor() *User { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (e *Event) GetCreatedAt() time.Time { +func (e *Event) GetCreatedAt() Timestamp { if e == nil || e.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *e.CreatedAt } @@ -5215,9 +6359,9 @@ func (g *Gist) GetComments() int { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetCreatedAt() time.Time { +func (g *Gist) GetCreatedAt() Timestamp { if g == nil || g.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *g.CreatedAt } @@ -5295,9 +6439,9 @@ func (g *Gist) GetPublic() bool { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (g *Gist) GetUpdatedAt() time.Time { +func (g *Gist) GetUpdatedAt() Timestamp { if g == nil || g.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *g.UpdatedAt } @@ -5311,9 +6455,9 @@ func (g *GistComment) GetBody() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GistComment) GetCreatedAt() time.Time { +func (g *GistComment) GetCreatedAt() Timestamp { if g == nil || g.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *g.CreatedAt } @@ -5647,17 +6791,17 @@ func (g *GPGKey) GetCanSign() bool { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetCreatedAt() time.Time { +func (g *GPGKey) GetCreatedAt() Timestamp { if g == nil || g.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *g.CreatedAt } // GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (g *GPGKey) GetExpiresAt() time.Time { +func (g *GPGKey) GetExpiresAt() Timestamp { if g == nil || g.ExpiresAt == nil { - return time.Time{} + return Timestamp{} } return *g.ExpiresAt } @@ -5823,9 +6967,9 @@ func (h *Hook) GetActive() bool { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetCreatedAt() time.Time { +func (h *Hook) GetCreatedAt() Timestamp { if h == nil || h.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *h.CreatedAt } @@ -5871,9 +7015,9 @@ func (h *Hook) GetType() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (h *Hook) GetUpdatedAt() time.Time { +func (h *Hook) GetUpdatedAt() Timestamp { if h == nil || h.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *h.UpdatedAt } @@ -6438,6 +7582,14 @@ func (i *InstallationEvent) GetInstallation() *Installation { return i.Installation } +// GetRequester returns the Requester field. +func (i *InstallationEvent) GetRequester() *User { + if i == nil { + return nil + } + return i.Requester +} + // GetSender returns the Sender field. func (i *InstallationEvent) GetSender() *User { if i == nil { @@ -6558,6 +7710,14 @@ func (i *InstallationPermissions) GetOrganizationAdministration() string { return *i.OrganizationAdministration } +// GetOrganizationCustomRoles returns the OrganizationCustomRoles field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationCustomRoles() string { + if i == nil || i.OrganizationCustomRoles == nil { + return "" + } + return *i.OrganizationCustomRoles +} + // GetOrganizationHooks returns the OrganizationHooks field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetOrganizationHooks() string { if i == nil || i.OrganizationHooks == nil { @@ -6566,6 +7726,14 @@ func (i *InstallationPermissions) GetOrganizationHooks() string { return *i.OrganizationHooks } +// GetOrganizationPackages returns the OrganizationPackages field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationPackages() string { + if i == nil || i.OrganizationPackages == nil { + return "" + } + return *i.OrganizationPackages +} + // GetOrganizationPlan returns the OrganizationPlan field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetOrganizationPlan() string { if i == nil || i.OrganizationPlan == nil { @@ -6759,9 +7927,9 @@ func (i *InstallationRepositoriesEvent) GetSender() *User { } // GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise. -func (i *InstallationToken) GetExpiresAt() time.Time { +func (i *InstallationToken) GetExpiresAt() Timestamp { if i == nil || i.ExpiresAt == nil { - return time.Time{} + return Timestamp{} } return *i.ExpiresAt } @@ -6815,9 +7983,9 @@ func (i *InteractionRestriction) GetOrigin() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Invitation) GetCreatedAt() time.Time { +func (i *Invitation) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -6935,9 +8103,9 @@ func (i *Issue) GetBody() string { } // GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetClosedAt() time.Time { +func (i *Issue) GetClosedAt() Timestamp { if i == nil || i.ClosedAt == nil { - return time.Time{} + return Timestamp{} } return *i.ClosedAt } @@ -6967,9 +8135,9 @@ func (i *Issue) GetCommentsURL() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetCreatedAt() time.Time { +func (i *Issue) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -7078,6 +8246,14 @@ func (i *Issue) GetState() string { return *i.State } +// GetStateReason returns the StateReason field if it's non-nil, zero value otherwise. +func (i *Issue) GetStateReason() string { + if i == nil || i.StateReason == nil { + return "" + } + return *i.StateReason +} + // GetTitle returns the Title field if it's non-nil, zero value otherwise. func (i *Issue) GetTitle() string { if i == nil || i.Title == nil { @@ -7087,9 +8263,9 @@ func (i *Issue) GetTitle() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *Issue) GetUpdatedAt() time.Time { +func (i *Issue) GetUpdatedAt() Timestamp { if i == nil || i.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.UpdatedAt } @@ -7127,9 +8303,9 @@ func (i *IssueComment) GetBody() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetCreatedAt() time.Time { +func (i *IssueComment) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -7175,9 +8351,9 @@ func (i *IssueComment) GetReactions() *Reactions { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueComment) GetUpdatedAt() time.Time { +func (i *IssueComment) GetUpdatedAt() Timestamp { if i == nil || i.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.UpdatedAt } @@ -7295,9 +8471,9 @@ func (i *IssueEvent) GetCommitID() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueEvent) GetCreatedAt() time.Time { +func (i *IssueEvent) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -7415,17 +8591,17 @@ func (i *IssueImport) GetClosed() bool { } // GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetClosedAt() time.Time { +func (i *IssueImport) GetClosedAt() Timestamp { if i == nil || i.ClosedAt == nil { - return time.Time{} + return Timestamp{} } return *i.ClosedAt } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetCreatedAt() time.Time { +func (i *IssueImport) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -7439,9 +8615,9 @@ func (i *IssueImport) GetMilestone() int { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImport) GetUpdatedAt() time.Time { +func (i *IssueImport) GetUpdatedAt() Timestamp { if i == nil || i.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.UpdatedAt } @@ -7487,9 +8663,9 @@ func (i *IssueImportError) GetValue() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetCreatedAt() time.Time { +func (i *IssueImportResponse) GetCreatedAt() Timestamp { if i == nil || i.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.CreatedAt } @@ -7543,9 +8719,9 @@ func (i *IssueImportResponse) GetStatus() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (i *IssueImportResponse) GetUpdatedAt() time.Time { +func (i *IssueImportResponse) GetUpdatedAt() Timestamp { if i == nil || i.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *i.UpdatedAt } @@ -7630,6 +8806,14 @@ func (i *IssueRequest) GetState() string { return *i.State } +// GetStateReason returns the StateReason field if it's non-nil, zero value otherwise. +func (i *IssueRequest) GetStateReason() string { + if i == nil || i.StateReason == nil { + return "" + } + return *i.StateReason +} + // GetTitle returns the Title field if it's non-nil, zero value otherwise. func (i *IssueRequest) GetTitle() string { if i == nil || i.Title == nil { @@ -7686,6 +8870,14 @@ func (i *IssuesEvent) GetLabel() *Label { return i.Label } +// GetMilestone returns the Milestone field. +func (i *IssuesEvent) GetMilestone() *Milestone { + if i == nil { + return nil + } + return i.Milestone +} + // GetRepo returns the Repo field. func (i *IssuesEvent) GetRepo() *Repository { if i == nil { @@ -7750,6 +8942,14 @@ func (j *Jobs) GetTotalCount() int { return *j.TotalCount } +// GetAddedBy returns the AddedBy field if it's non-nil, zero value otherwise. +func (k *Key) GetAddedBy() string { + if k == nil || k.AddedBy == nil { + return "" + } + return *k.AddedBy +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (k *Key) GetCreatedAt() Timestamp { if k == nil || k.CreatedAt == nil { @@ -7774,6 +8974,14 @@ func (k *Key) GetKey() string { return *k.Key } +// GetLastUsed returns the LastUsed field if it's non-nil, zero value otherwise. +func (k *Key) GetLastUsed() Timestamp { + if k == nil || k.LastUsed == nil { + return Timestamp{} + } + return *k.LastUsed +} + // GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise. func (k *Key) GetReadOnly() bool { if k == nil || k.ReadOnly == nil { @@ -8118,6 +9326,70 @@ func (l *License) GetURL() string { return *l.URL } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (l *LinearHistoryRequirementEnforcementLevelChanges) GetFrom() string { + if l == nil || l.From == nil { + return "" + } + return *l.From +} + +// GetDirection returns the Direction field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetDirection() string { + if l == nil || l.Direction == nil { + return "" + } + return *l.Direction +} + +// GetEcosystem returns the Ecosystem field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetEcosystem() string { + if l == nil || l.Ecosystem == nil { + return "" + } + return *l.Ecosystem +} + +// GetPackage returns the Package field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetPackage() string { + if l == nil || l.Package == nil { + return "" + } + return *l.Package +} + +// GetScope returns the Scope field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetScope() string { + if l == nil || l.Scope == nil { + return "" + } + return *l.Scope +} + +// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetSeverity() string { + if l == nil || l.Severity == nil { + return "" + } + return *l.Severity +} + +// GetSort returns the Sort field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetSort() string { + if l == nil || l.Sort == nil { + return "" + } + return *l.Sort +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (l *ListAlertsOptions) GetState() string { + if l == nil || l.State == nil { + return "" + } + return *l.State +} + // GetAppID returns the AppID field if it's non-nil, zero value otherwise. func (l *ListCheckRunsOptions) GetAppID() int64 { if l == nil || l.AppID == nil { @@ -8270,6 +9542,14 @@ func (l *Location) GetStartLine() int { return *l.StartLine } +// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. +func (l *LockBranch) GetEnabled() bool { + if l == nil || l.Enabled == nil { + return false + } + return *l.Enabled +} + // GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. func (m *MarketplacePendingChange) GetEffectiveDate() Timestamp { if m == nil || m.EffectiveDate == nil { @@ -8462,6 +9742,14 @@ func (m *MarketplacePlanAccount) GetURL() string { return *m.URL } +// GetAccount returns the Account field. +func (m *MarketplacePurchase) GetAccount() *MarketplacePurchaseAccount { + if m == nil { + return nil + } + return m.Account +} + // GetBillingCycle returns the BillingCycle field if it's non-nil, zero value otherwise. func (m *MarketplacePurchase) GetBillingCycle() string { if m == nil || m.BillingCycle == nil { @@ -8518,6 +9806,62 @@ func (m *MarketplacePurchase) GetUpdatedAt() Timestamp { return *m.UpdatedAt } +// GetEmail returns the Email field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetEmail() string { + if m == nil || m.Email == nil { + return "" + } + return *m.Email +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetID() int64 { + if m == nil || m.ID == nil { + return 0 + } + return *m.ID +} + +// GetLogin returns the Login field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetLogin() string { + if m == nil || m.Login == nil { + return "" + } + return *m.Login +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetNodeID() string { + if m == nil || m.NodeID == nil { + return "" + } + return *m.NodeID +} + +// GetOrganizationBillingEmail returns the OrganizationBillingEmail field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetOrganizationBillingEmail() string { + if m == nil || m.OrganizationBillingEmail == nil { + return "" + } + return *m.OrganizationBillingEmail +} + +// GetType returns the Type field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetType() string { + if m == nil || m.Type == nil { + return "" + } + return *m.Type +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (m *MarketplacePurchaseAccount) GetURL() string { + if m == nil || m.URL == nil { + return "" + } + return *m.URL +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (m *MarketplacePurchaseEvent) GetAction() string { if m == nil || m.Action == nil { @@ -8718,6 +10062,94 @@ func (m *MembershipEvent) GetTeam() *Team { return m.Team } +// GetBaseRef returns the BaseRef field if it's non-nil, zero value otherwise. +func (m *MergeGroup) GetBaseRef() string { + if m == nil || m.BaseRef == nil { + return "" + } + return *m.BaseRef +} + +// GetBaseSHA returns the BaseSHA field if it's non-nil, zero value otherwise. +func (m *MergeGroup) GetBaseSHA() string { + if m == nil || m.BaseSHA == nil { + return "" + } + return *m.BaseSHA +} + +// GetHeadCommit returns the HeadCommit field. +func (m *MergeGroup) GetHeadCommit() *Commit { + if m == nil { + return nil + } + return m.HeadCommit +} + +// GetHeadRef returns the HeadRef field if it's non-nil, zero value otherwise. +func (m *MergeGroup) GetHeadRef() string { + if m == nil || m.HeadRef == nil { + return "" + } + return *m.HeadRef +} + +// GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. +func (m *MergeGroup) GetHeadSHA() string { + if m == nil || m.HeadSHA == nil { + return "" + } + return *m.HeadSHA +} + +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (m *MergeGroupEvent) GetAction() string { + if m == nil || m.Action == nil { + return "" + } + return *m.Action +} + +// GetInstallation returns the Installation field. +func (m *MergeGroupEvent) GetInstallation() *Installation { + if m == nil { + return nil + } + return m.Installation +} + +// GetMergeGroup returns the MergeGroup field. +func (m *MergeGroupEvent) GetMergeGroup() *MergeGroup { + if m == nil { + return nil + } + return m.MergeGroup +} + +// GetOrg returns the Org field. +func (m *MergeGroupEvent) GetOrg() *Organization { + if m == nil { + return nil + } + return m.Org +} + +// GetRepo returns the Repo field. +func (m *MergeGroupEvent) GetRepo() *Repository { + if m == nil { + return nil + } + return m.Repo +} + +// GetSender returns the Sender field. +func (m *MergeGroupEvent) GetSender() *User { + if m == nil { + return nil + } + return m.Sender +} + // GetText returns the Text field if it's non-nil, zero value otherwise. func (m *Message) GetText() string { if m == nil || m.Text == nil { @@ -8895,9 +10327,9 @@ func (m *Migration) GetURL() string { } // GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetClosedAt() time.Time { +func (m *Milestone) GetClosedAt() Timestamp { if m == nil || m.ClosedAt == nil { - return time.Time{} + return Timestamp{} } return *m.ClosedAt } @@ -8911,9 +10343,9 @@ func (m *Milestone) GetClosedIssues() int { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetCreatedAt() time.Time { +func (m *Milestone) GetCreatedAt() Timestamp { if m == nil || m.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *m.CreatedAt } @@ -8935,9 +10367,9 @@ func (m *Milestone) GetDescription() string { } // GetDueOn returns the DueOn field if it's non-nil, zero value otherwise. -func (m *Milestone) GetDueOn() time.Time { +func (m *Milestone) GetDueOn() Timestamp { if m == nil || m.DueOn == nil { - return time.Time{} + return Timestamp{} } return *m.DueOn } @@ -9007,9 +10439,9 @@ func (m *Milestone) GetTitle() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (m *Milestone) GetUpdatedAt() time.Time { +func (m *Milestone) GetUpdatedAt() Timestamp { if m == nil || m.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *m.UpdatedAt } @@ -9263,9 +10695,9 @@ func (n *Notification) GetID() string { } // GetLastReadAt returns the LastReadAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetLastReadAt() time.Time { +func (n *Notification) GetLastReadAt() Timestamp { if n == nil || n.LastReadAt == nil { - return time.Time{} + return Timestamp{} } return *n.LastReadAt } @@ -9303,9 +10735,9 @@ func (n *Notification) GetUnread() bool { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (n *Notification) GetUpdatedAt() time.Time { +func (n *Notification) GetUpdatedAt() Timestamp { if n == nil || n.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *n.UpdatedAt } @@ -9363,15 +10795,31 @@ func (o *OAuthAPP) GetName() string { if o == nil || o.Name == nil { return "" } - return *o.Name + return *o.Name +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (o *OAuthAPP) GetURL() string { + if o == nil || o.URL == nil { + return "" + } + return *o.URL +} + +// GetUseDefault returns the UseDefault field if it's non-nil, zero value otherwise. +func (o *OIDCSubjectClaimCustomTemplate) GetUseDefault() bool { + if o == nil || o.UseDefault == nil { + return false + } + return *o.UseDefault } -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (o *OAuthAPP) GetURL() string { - if o == nil || o.URL == nil { - return "" +// GetAdvancedSecurityEnabledForNewRepos returns the AdvancedSecurityEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetAdvancedSecurityEnabledForNewRepos() bool { + if o == nil || o.AdvancedSecurityEnabledForNewRepos == nil { + return false } - return *o.URL + return *o.AdvancedSecurityEnabledForNewRepos } // GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise. @@ -9415,9 +10863,9 @@ func (o *Organization) GetCompany() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetCreatedAt() time.Time { +func (o *Organization) GetCreatedAt() Timestamp { if o == nil || o.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *o.CreatedAt } @@ -9438,6 +10886,30 @@ func (o *Organization) GetDefaultRepoSettings() string { return *o.DefaultRepoSettings } +// GetDependabotAlertsEnabledForNewRepos returns the DependabotAlertsEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetDependabotAlertsEnabledForNewRepos() bool { + if o == nil || o.DependabotAlertsEnabledForNewRepos == nil { + return false + } + return *o.DependabotAlertsEnabledForNewRepos +} + +// GetDependabotSecurityUpdatesEnabledForNewRepos returns the DependabotSecurityUpdatesEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetDependabotSecurityUpdatesEnabledForNewRepos() bool { + if o == nil || o.DependabotSecurityUpdatesEnabledForNewRepos == nil { + return false + } + return *o.DependabotSecurityUpdatesEnabledForNewRepos +} + +// GetDependencyGraphEnabledForNewRepos returns the DependencyGraphEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetDependencyGraphEnabledForNewRepos() bool { + if o == nil || o.DependencyGraphEnabledForNewRepos == nil { + return false + } + return *o.DependencyGraphEnabledForNewRepos +} + // GetDescription returns the Description field if it's non-nil, zero value otherwise. func (o *Organization) GetDescription() string { if o == nil || o.Description == nil { @@ -9655,7 +11127,7 @@ func (o *Organization) GetNodeID() string { } // GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetOwnedPrivateRepos() int { +func (o *Organization) GetOwnedPrivateRepos() int64 { if o == nil || o.OwnedPrivateRepos == nil { return 0 } @@ -9710,8 +11182,24 @@ func (o *Organization) GetReposURL() string { return *o.ReposURL } +// GetSecretScanningEnabledForNewRepos returns the SecretScanningEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetSecretScanningEnabledForNewRepos() bool { + if o == nil || o.SecretScanningEnabledForNewRepos == nil { + return false + } + return *o.SecretScanningEnabledForNewRepos +} + +// GetSecretScanningPushProtectionEnabledForNewRepos returns the SecretScanningPushProtectionEnabledForNewRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetSecretScanningPushProtectionEnabledForNewRepos() bool { + if o == nil || o.SecretScanningPushProtectionEnabledForNewRepos == nil { + return false + } + return *o.SecretScanningPushProtectionEnabledForNewRepos +} + // GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (o *Organization) GetTotalPrivateRepos() int { +func (o *Organization) GetTotalPrivateRepos() int64 { if o == nil || o.TotalPrivateRepos == nil { return 0 } @@ -9743,9 +11231,9 @@ func (o *Organization) GetType() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (o *Organization) GetUpdatedAt() time.Time { +func (o *Organization) GetUpdatedAt() Timestamp { if o == nil || o.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *o.UpdatedAt } @@ -9758,6 +11246,14 @@ func (o *Organization) GetURL() string { return *o.URL } +// GetWebCommitSignoffRequired returns the WebCommitSignoffRequired field if it's non-nil, zero value otherwise. +func (o *Organization) GetWebCommitSignoffRequired() bool { + if o == nil || o.WebCommitSignoffRequired == nil { + return false + } + return *o.WebCommitSignoffRequired +} + // GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. func (o *OrganizationCustomRepoRoles) GetTotalCount() int { if o == nil || o.TotalCount == nil { @@ -9862,6 +11358,94 @@ func (o *OrgBlockEvent) GetSender() *User { return o.Sender } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetCreatedAt() Timestamp { + if o == nil || o.CreatedAt == nil { + return Timestamp{} + } + return *o.CreatedAt +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetID() int64 { + if o == nil || o.ID == nil { + return 0 + } + return *o.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetName() string { + if o == nil || o.Name == nil { + return "" + } + return *o.Name +} + +// GetPath returns the Path field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetPath() string { + if o == nil || o.Path == nil { + return "" + } + return *o.Path +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetRef() string { + if o == nil || o.Ref == nil { + return "" + } + return *o.Ref +} + +// GetRepository returns the Repository field. +func (o *OrgRequiredWorkflow) GetRepository() *Repository { + if o == nil { + return nil + } + return o.Repository +} + +// GetScope returns the Scope field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetScope() string { + if o == nil || o.Scope == nil { + return "" + } + return *o.Scope +} + +// GetSelectedRepositoriesURL returns the SelectedRepositoriesURL field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetSelectedRepositoriesURL() string { + if o == nil || o.SelectedRepositoriesURL == nil { + return "" + } + return *o.SelectedRepositoriesURL +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetState() string { + if o == nil || o.State == nil { + return "" + } + return *o.State +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflow) GetUpdatedAt() Timestamp { + if o == nil || o.UpdatedAt == nil { + return Timestamp{} + } + return *o.UpdatedAt +} + +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (o *OrgRequiredWorkflows) GetTotalCount() int { + if o == nil || o.TotalCount == nil { + return 0 + } + return *o.TotalCount +} + // GetDisabledOrgs returns the DisabledOrgs field if it's non-nil, zero value otherwise. func (o *OrgStats) GetDisabledOrgs() int { if o == nil || o.DisabledOrgs == nil { @@ -9894,6 +11478,22 @@ func (o *OrgStats) GetTotalTeams() int { return *o.TotalTeams } +// GetOrg returns the Org field. +func (o *OwnerInfo) GetOrg() *User { + if o == nil { + return nil + } + return o.Org +} + +// GetUser returns the User field. +func (o *OwnerInfo) GetUser() *User { + if o == nil { + return nil + } + return o.User +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (p *Package) GetCreatedAt() Timestamp { if p == nil || p.CreatedAt == nil { @@ -10566,6 +12166,14 @@ func (p *PageBuildEvent) GetSender() *User { return p.Sender } +// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise. +func (p *Pages) GetBuildType() string { + if p == nil || p.BuildType == nil { + return "" + } + return *p.BuildType +} + // GetCNAME returns the CNAME field if it's non-nil, zero value otherwise. func (p *Pages) GetCNAME() string { if p == nil || p.CNAME == nil { @@ -10702,6 +12310,230 @@ func (p *PagesBuild) GetURL() string { return *p.URL } +// GetCAAError returns the CAAError field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetCAAError() string { + if p == nil || p.CAAError == nil { + return "" + } + return *p.CAAError +} + +// GetDNSResolves returns the DNSResolves field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetDNSResolves() bool { + if p == nil || p.DNSResolves == nil { + return false + } + return *p.DNSResolves +} + +// GetEnforcesHTTPS returns the EnforcesHTTPS field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetEnforcesHTTPS() bool { + if p == nil || p.EnforcesHTTPS == nil { + return false + } + return *p.EnforcesHTTPS +} + +// GetHasCNAMERecord returns the HasCNAMERecord field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetHasCNAMERecord() bool { + if p == nil || p.HasCNAMERecord == nil { + return false + } + return *p.HasCNAMERecord +} + +// GetHasMXRecordsPresent returns the HasMXRecordsPresent field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetHasMXRecordsPresent() bool { + if p == nil || p.HasMXRecordsPresent == nil { + return false + } + return *p.HasMXRecordsPresent +} + +// GetHost returns the Host field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetHost() string { + if p == nil || p.Host == nil { + return "" + } + return *p.Host +} + +// GetHTTPSError returns the HTTPSError field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetHTTPSError() string { + if p == nil || p.HTTPSError == nil { + return "" + } + return *p.HTTPSError +} + +// GetIsApexDomain returns the IsApexDomain field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsApexDomain() bool { + if p == nil || p.IsApexDomain == nil { + return false + } + return *p.IsApexDomain +} + +// GetIsARecord returns the IsARecord field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsARecord() bool { + if p == nil || p.IsARecord == nil { + return false + } + return *p.IsARecord +} + +// GetIsCloudflareIP returns the IsCloudflareIP field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsCloudflareIP() bool { + if p == nil || p.IsCloudflareIP == nil { + return false + } + return *p.IsCloudflareIP +} + +// GetIsCNAMEToFastly returns the IsCNAMEToFastly field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsCNAMEToFastly() bool { + if p == nil || p.IsCNAMEToFastly == nil { + return false + } + return *p.IsCNAMEToFastly +} + +// GetIsCNAMEToGithubUserDomain returns the IsCNAMEToGithubUserDomain field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsCNAMEToGithubUserDomain() bool { + if p == nil || p.IsCNAMEToGithubUserDomain == nil { + return false + } + return *p.IsCNAMEToGithubUserDomain +} + +// GetIsCNAMEToPagesDotGithubDotCom returns the IsCNAMEToPagesDotGithubDotCom field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsCNAMEToPagesDotGithubDotCom() bool { + if p == nil || p.IsCNAMEToPagesDotGithubDotCom == nil { + return false + } + return *p.IsCNAMEToPagesDotGithubDotCom +} + +// GetIsFastlyIP returns the IsFastlyIP field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsFastlyIP() bool { + if p == nil || p.IsFastlyIP == nil { + return false + } + return *p.IsFastlyIP +} + +// GetIsHTTPSEligible returns the IsHTTPSEligible field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsHTTPSEligible() bool { + if p == nil || p.IsHTTPSEligible == nil { + return false + } + return *p.IsHTTPSEligible +} + +// GetIsNonGithubPagesIPPresent returns the IsNonGithubPagesIPPresent field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsNonGithubPagesIPPresent() bool { + if p == nil || p.IsNonGithubPagesIPPresent == nil { + return false + } + return *p.IsNonGithubPagesIPPresent +} + +// GetIsOldIPAddress returns the IsOldIPAddress field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsOldIPAddress() bool { + if p == nil || p.IsOldIPAddress == nil { + return false + } + return *p.IsOldIPAddress +} + +// GetIsPagesDomain returns the IsPagesDomain field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsPagesDomain() bool { + if p == nil || p.IsPagesDomain == nil { + return false + } + return *p.IsPagesDomain +} + +// GetIsPointedToGithubPagesIP returns the IsPointedToGithubPagesIP field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsPointedToGithubPagesIP() bool { + if p == nil || p.IsPointedToGithubPagesIP == nil { + return false + } + return *p.IsPointedToGithubPagesIP +} + +// GetIsProxied returns the IsProxied field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsProxied() bool { + if p == nil || p.IsProxied == nil { + return false + } + return *p.IsProxied +} + +// GetIsServedByPages returns the IsServedByPages field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsServedByPages() bool { + if p == nil || p.IsServedByPages == nil { + return false + } + return *p.IsServedByPages +} + +// GetIsValid returns the IsValid field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsValid() bool { + if p == nil || p.IsValid == nil { + return false + } + return *p.IsValid +} + +// GetIsValidDomain returns the IsValidDomain field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetIsValidDomain() bool { + if p == nil || p.IsValidDomain == nil { + return false + } + return *p.IsValidDomain +} + +// GetNameservers returns the Nameservers field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetNameservers() string { + if p == nil || p.Nameservers == nil { + return "" + } + return *p.Nameservers +} + +// GetReason returns the Reason field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetReason() string { + if p == nil || p.Reason == nil { + return "" + } + return *p.Reason +} + +// GetRespondsToHTTPS returns the RespondsToHTTPS field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetRespondsToHTTPS() bool { + if p == nil || p.RespondsToHTTPS == nil { + return false + } + return *p.RespondsToHTTPS +} + +// GetShouldBeARecord returns the ShouldBeARecord field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetShouldBeARecord() bool { + if p == nil || p.ShouldBeARecord == nil { + return false + } + return *p.ShouldBeARecord +} + +// GetURI returns the URI field if it's non-nil, zero value otherwise. +func (p *PagesDomain) GetURI() string { + if p == nil || p.URI == nil { + return "" + } + return *p.URI +} + // GetMessage returns the Message field if it's non-nil, zero value otherwise. func (p *PagesError) GetMessage() string { if p == nil || p.Message == nil { @@ -10710,6 +12542,22 @@ func (p *PagesError) GetMessage() string { return *p.Message } +// GetAltDomain returns the AltDomain field. +func (p *PagesHealthCheckResponse) GetAltDomain() *PagesDomain { + if p == nil { + return nil + } + return p.AltDomain +} + +// GetDomain returns the Domain field. +func (p *PagesHealthCheckResponse) GetDomain() *PagesDomain { + if p == nil { + return nil + } + return p.Domain +} + // GetDescription returns the Description field if it's non-nil, zero value otherwise. func (p *PagesHTTPSCertificate) GetDescription() string { if p == nil || p.Description == nil { @@ -10758,6 +12606,14 @@ func (p *PageStats) GetTotalPages() int { return *p.TotalPages } +// GetBuildType returns the BuildType field if it's non-nil, zero value otherwise. +func (p *PagesUpdate) GetBuildType() string { + if p == nil || p.BuildType == nil { + return "" + } + return *p.BuildType +} + // GetCNAME returns the CNAME field if it's non-nil, zero value otherwise. func (p *PagesUpdate) GetCNAME() string { if p == nil || p.CNAME == nil { @@ -10782,12 +12638,12 @@ func (p *PagesUpdate) GetPublic() bool { return *p.Public } -// GetSource returns the Source field if it's non-nil, zero value otherwise. -func (p *PagesUpdate) GetSource() string { - if p == nil || p.Source == nil { - return "" +// GetSource returns the Source field. +func (p *PagesUpdate) GetSource() *PagesSource { + if p == nil { + return nil } - return *p.Source + return p.Source } // GetHook returns the Hook field. @@ -10871,7 +12727,7 @@ func (p *Plan) GetName() string { } // GetPrivateRepos returns the PrivateRepos field if it's non-nil, zero value otherwise. -func (p *Plan) GetPrivateRepos() int { +func (p *Plan) GetPrivateRepos() int64 { if p == nil || p.PrivateRepos == nil { return 0 } @@ -10891,7 +12747,23 @@ func (p *Plan) GetSpace() int { if p == nil || p.Space == nil { return 0 } - return *p.Space + return *p.Space +} + +// GetCode returns the Code field if it's non-nil, zero value otherwise. +func (p *PolicyOverrideReason) GetCode() string { + if p == nil || p.Code == nil { + return "" + } + return *p.Code +} + +// GetMessage returns the Message field if it's non-nil, zero value otherwise. +func (p *PolicyOverrideReason) GetMessage() string { + if p == nil || p.Message == nil { + return "" + } + return *p.Message } // GetConfigURL returns the ConfigURL field if it's non-nil, zero value otherwise. @@ -11646,6 +13518,22 @@ func (p *Protection) GetAllowForcePushes() *AllowForcePushes { return p.AllowForcePushes } +// GetAllowForkSyncing returns the AllowForkSyncing field. +func (p *Protection) GetAllowForkSyncing() *AllowForkSyncing { + if p == nil { + return nil + } + return p.AllowForkSyncing +} + +// GetBlockCreations returns the BlockCreations field. +func (p *Protection) GetBlockCreations() *BlockCreations { + if p == nil { + return nil + } + return p.BlockCreations +} + // GetEnforceAdmins returns the EnforceAdmins field. func (p *Protection) GetEnforceAdmins() *AdminEnforcement { if p == nil { @@ -11654,6 +13542,14 @@ func (p *Protection) GetEnforceAdmins() *AdminEnforcement { return p.EnforceAdmins } +// GetLockBranch returns the LockBranch field. +func (p *Protection) GetLockBranch() *LockBranch { + if p == nil { + return nil + } + return p.LockBranch +} + // GetRequiredConversationResolution returns the RequiredConversationResolution field. func (p *Protection) GetRequiredConversationResolution() *RequiredConversationResolution { if p == nil { @@ -11670,6 +13566,14 @@ func (p *Protection) GetRequiredPullRequestReviews() *PullRequestReviewsEnforcem return p.RequiredPullRequestReviews } +// GetRequiredSignatures returns the RequiredSignatures field. +func (p *Protection) GetRequiredSignatures() *SignaturesProtectedBranch { + if p == nil { + return nil + } + return p.RequiredSignatures +} + // GetRequiredStatusChecks returns the RequiredStatusChecks field. func (p *Protection) GetRequiredStatusChecks() *RequiredStatusChecks { if p == nil { @@ -11694,6 +13598,30 @@ func (p *Protection) GetRestrictions() *BranchRestrictions { return p.Restrictions } +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (p *Protection) GetURL() string { + if p == nil || p.URL == nil { + return "" + } + return *p.URL +} + +// GetAdminEnforced returns the AdminEnforced field. +func (p *ProtectionChanges) GetAdminEnforced() *AdminEnforcedChanges { + if p == nil { + return nil + } + return p.AdminEnforced +} + +// GetAllowDeletionsEnforcementLevel returns the AllowDeletionsEnforcementLevel field. +func (p *ProtectionChanges) GetAllowDeletionsEnforcementLevel() *AllowDeletionsEnforcementLevelChanges { + if p == nil { + return nil + } + return p.AllowDeletionsEnforcementLevel +} + // GetAuthorizedActorNames returns the AuthorizedActorNames field. func (p *ProtectionChanges) GetAuthorizedActorNames() *AuthorizedActorNames { if p == nil { @@ -11710,6 +13638,94 @@ func (p *ProtectionChanges) GetAuthorizedActorsOnly() *AuthorizedActorsOnly { return p.AuthorizedActorsOnly } +// GetAuthorizedDismissalActorsOnly returns the AuthorizedDismissalActorsOnly field. +func (p *ProtectionChanges) GetAuthorizedDismissalActorsOnly() *AuthorizedDismissalActorsOnlyChanges { + if p == nil { + return nil + } + return p.AuthorizedDismissalActorsOnly +} + +// GetCreateProtected returns the CreateProtected field. +func (p *ProtectionChanges) GetCreateProtected() *CreateProtectedChanges { + if p == nil { + return nil + } + return p.CreateProtected +} + +// GetDismissStaleReviewsOnPush returns the DismissStaleReviewsOnPush field. +func (p *ProtectionChanges) GetDismissStaleReviewsOnPush() *DismissStaleReviewsOnPushChanges { + if p == nil { + return nil + } + return p.DismissStaleReviewsOnPush +} + +// GetLinearHistoryRequirementEnforcementLevel returns the LinearHistoryRequirementEnforcementLevel field. +func (p *ProtectionChanges) GetLinearHistoryRequirementEnforcementLevel() *LinearHistoryRequirementEnforcementLevelChanges { + if p == nil { + return nil + } + return p.LinearHistoryRequirementEnforcementLevel +} + +// GetPullRequestReviewsEnforcementLevel returns the PullRequestReviewsEnforcementLevel field. +func (p *ProtectionChanges) GetPullRequestReviewsEnforcementLevel() *PullRequestReviewsEnforcementLevelChanges { + if p == nil { + return nil + } + return p.PullRequestReviewsEnforcementLevel +} + +// GetRequireCodeOwnerReview returns the RequireCodeOwnerReview field. +func (p *ProtectionChanges) GetRequireCodeOwnerReview() *RequireCodeOwnerReviewChanges { + if p == nil { + return nil + } + return p.RequireCodeOwnerReview +} + +// GetRequiredConversationResolutionLevel returns the RequiredConversationResolutionLevel field. +func (p *ProtectionChanges) GetRequiredConversationResolutionLevel() *RequiredConversationResolutionLevelChanges { + if p == nil { + return nil + } + return p.RequiredConversationResolutionLevel +} + +// GetRequiredDeploymentsEnforcementLevel returns the RequiredDeploymentsEnforcementLevel field. +func (p *ProtectionChanges) GetRequiredDeploymentsEnforcementLevel() *RequiredDeploymentsEnforcementLevelChanges { + if p == nil { + return nil + } + return p.RequiredDeploymentsEnforcementLevel +} + +// GetRequiredStatusChecks returns the RequiredStatusChecks field. +func (p *ProtectionChanges) GetRequiredStatusChecks() *RequiredStatusChecksChanges { + if p == nil { + return nil + } + return p.RequiredStatusChecks +} + +// GetRequiredStatusChecksEnforcementLevel returns the RequiredStatusChecksEnforcementLevel field. +func (p *ProtectionChanges) GetRequiredStatusChecksEnforcementLevel() *RequiredStatusChecksEnforcementLevelChanges { + if p == nil { + return nil + } + return p.RequiredStatusChecksEnforcementLevel +} + +// GetSignatureRequirementEnforcementLevel returns the SignatureRequirementEnforcementLevel field. +func (p *ProtectionChanges) GetSignatureRequirementEnforcementLevel() *SignatureRequirementEnforcementLevelChanges { + if p == nil { + return nil + } + return p.SignatureRequirementEnforcementLevel +} + // GetAllowDeletions returns the AllowDeletions field if it's non-nil, zero value otherwise. func (p *ProtectionRequest) GetAllowDeletions() bool { if p == nil || p.AllowDeletions == nil { @@ -11726,6 +13742,30 @@ func (p *ProtectionRequest) GetAllowForcePushes() bool { return *p.AllowForcePushes } +// GetAllowForkSyncing returns the AllowForkSyncing field if it's non-nil, zero value otherwise. +func (p *ProtectionRequest) GetAllowForkSyncing() bool { + if p == nil || p.AllowForkSyncing == nil { + return false + } + return *p.AllowForkSyncing +} + +// GetBlockCreations returns the BlockCreations field if it's non-nil, zero value otherwise. +func (p *ProtectionRequest) GetBlockCreations() bool { + if p == nil || p.BlockCreations == nil { + return false + } + return *p.BlockCreations +} + +// GetLockBranch returns the LockBranch field if it's non-nil, zero value otherwise. +func (p *ProtectionRequest) GetLockBranch() bool { + if p == nil || p.LockBranch == nil { + return false + } + return *p.LockBranch +} + // GetRequiredConversationResolution returns the RequiredConversationResolution field if it's non-nil, zero value otherwise. func (p *ProtectionRequest) GetRequiredConversationResolution() bool { if p == nil || p.RequiredConversationResolution == nil { @@ -11903,9 +13943,9 @@ func (p *PullRequest) GetChangedFiles() int { } // GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetClosedAt() time.Time { +func (p *PullRequest) GetClosedAt() Timestamp { if p == nil || p.ClosedAt == nil { - return time.Time{} + return Timestamp{} } return *p.ClosedAt } @@ -11943,9 +13983,9 @@ func (p *PullRequest) GetCommitsURL() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetCreatedAt() time.Time { +func (p *PullRequest) GetCreatedAt() Timestamp { if p == nil || p.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *p.CreatedAt } @@ -12063,9 +14103,9 @@ func (p *PullRequest) GetMerged() bool { } // GetMergedAt returns the MergedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetMergedAt() time.Time { +func (p *PullRequest) GetMergedAt() Timestamp { if p == nil || p.MergedAt == nil { - return time.Time{} + return Timestamp{} } return *p.MergedAt } @@ -12167,9 +14207,9 @@ func (p *PullRequest) GetTitle() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequest) GetUpdatedAt() time.Time { +func (p *PullRequest) GetUpdatedAt() Timestamp { if p == nil || p.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *p.UpdatedAt } @@ -12311,9 +14351,9 @@ func (p *PullRequestComment) GetCommitID() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetCreatedAt() time.Time { +func (p *PullRequestComment) GetCreatedAt() Timestamp { if p == nil || p.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *p.CreatedAt } @@ -12463,9 +14503,9 @@ func (p *PullRequestComment) GetStartSide() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestComment) GetUpdatedAt() time.Time { +func (p *PullRequestComment) GetUpdatedAt() Timestamp { if p == nil || p.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *p.UpdatedAt } @@ -12719,9 +14759,9 @@ func (p *PullRequestReview) GetState() string { } // GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise. -func (p *PullRequestReview) GetSubmittedAt() time.Time { +func (p *PullRequestReview) GetSubmittedAt() Timestamp { if p == nil || p.SubmittedAt == nil { - return time.Time{} + return Timestamp{} } return *p.SubmittedAt } @@ -12886,6 +14926,14 @@ func (p *PullRequestReviewRequest) GetNodeID() string { return *p.NodeID } +// GetBypassPullRequestAllowances returns the BypassPullRequestAllowances field. +func (p *PullRequestReviewsEnforcement) GetBypassPullRequestAllowances() *BypassPullRequestAllowances { + if p == nil { + return nil + } + return p.BypassPullRequestAllowances +} + // GetDismissalRestrictions returns the DismissalRestrictions field. func (p *PullRequestReviewsEnforcement) GetDismissalRestrictions() *DismissalRestrictions { if p == nil { @@ -12894,6 +14942,22 @@ func (p *PullRequestReviewsEnforcement) GetDismissalRestrictions() *DismissalRes return p.DismissalRestrictions } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (p *PullRequestReviewsEnforcementLevelChanges) GetFrom() string { + if p == nil || p.From == nil { + return "" + } + return *p.From +} + +// GetBypassPullRequestAllowancesRequest returns the BypassPullRequestAllowancesRequest field. +func (p *PullRequestReviewsEnforcementRequest) GetBypassPullRequestAllowancesRequest() *BypassPullRequestAllowancesRequest { + if p == nil { + return nil + } + return p.BypassPullRequestAllowancesRequest +} + // GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. func (p *PullRequestReviewsEnforcementRequest) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { if p == nil { @@ -12902,6 +14966,22 @@ func (p *PullRequestReviewsEnforcementRequest) GetDismissalRestrictionsRequest() return p.DismissalRestrictionsRequest } +// GetRequireLastPushApproval returns the RequireLastPushApproval field if it's non-nil, zero value otherwise. +func (p *PullRequestReviewsEnforcementRequest) GetRequireLastPushApproval() bool { + if p == nil || p.RequireLastPushApproval == nil { + return false + } + return *p.RequireLastPushApproval +} + +// GetBypassPullRequestAllowancesRequest returns the BypassPullRequestAllowancesRequest field. +func (p *PullRequestReviewsEnforcementUpdate) GetBypassPullRequestAllowancesRequest() *BypassPullRequestAllowancesRequest { + if p == nil { + return nil + } + return p.BypassPullRequestAllowancesRequest +} + // GetDismissalRestrictionsRequest returns the DismissalRestrictionsRequest field. func (p *PullRequestReviewsEnforcementUpdate) GetDismissalRestrictionsRequest() *DismissalRestrictionsRequest { if p == nil { @@ -12926,6 +15006,14 @@ func (p *PullRequestReviewsEnforcementUpdate) GetRequireCodeOwnerReviews() bool return *p.RequireCodeOwnerReviews } +// GetRequireLastPushApproval returns the RequireLastPushApproval field if it's non-nil, zero value otherwise. +func (p *PullRequestReviewsEnforcementUpdate) GetRequireLastPushApproval() bool { + if p == nil || p.RequireLastPushApproval == nil { + return false + } + return *p.RequireLastPushApproval +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (p *PullRequestReviewThreadEvent) GetAction() string { if p == nil || p.Action == nil { @@ -13190,6 +15278,14 @@ func (p *PushEvent) GetBefore() string { return *p.Before } +// GetCommits returns the Commits slice if it's non-nil, nil otherwise. +func (p *PushEvent) GetCommits() []*HeadCommit { + if p == nil || p.Commits == nil { + return nil + } + return p.Commits +} + // GetCompare returns the Compare field if it's non-nil, zero value otherwise. func (p *PushEvent) GetCompare() string { if p == nil || p.Compare == nil { @@ -14070,6 +16166,102 @@ func (r *RepoName) GetFrom() string { return *r.From } +// GetBadgeURL returns the BadgeURL field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetBadgeURL() string { + if r == nil || r.BadgeURL == nil { + return "" + } + return *r.BadgeURL +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetCreatedAt() Timestamp { + if r == nil || r.CreatedAt == nil { + return Timestamp{} + } + return *r.CreatedAt +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetHTMLURL() string { + if r == nil || r.HTMLURL == nil { + return "" + } + return *r.HTMLURL +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetID() int64 { + if r == nil || r.ID == nil { + return 0 + } + return *r.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetName() string { + if r == nil || r.Name == nil { + return "" + } + return *r.Name +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetNodeID() string { + if r == nil || r.NodeID == nil { + return "" + } + return *r.NodeID +} + +// GetPath returns the Path field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetPath() string { + if r == nil || r.Path == nil { + return "" + } + return *r.Path +} + +// GetSourceRepository returns the SourceRepository field. +func (r *RepoRequiredWorkflow) GetSourceRepository() *Repository { + if r == nil { + return nil + } + return r.SourceRepository +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetState() string { + if r == nil || r.State == nil { + return "" + } + return *r.State +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetUpdatedAt() Timestamp { + if r == nil || r.UpdatedAt == nil { + return Timestamp{} + } + return *r.UpdatedAt +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflow) GetURL() string { + if r == nil || r.URL == nil { + return "" + } + return *r.URL +} + +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (r *RepoRequiredWorkflows) GetTotalCount() int { + if r == nil || r.TotalCount == nil { + return 0 + } + return *r.TotalCount +} + // GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. func (r *RepositoriesSearchResult) GetIncompleteResults() bool { if r == nil || r.IncompleteResults == nil { @@ -14382,6 +16574,14 @@ func (r *Repository) GetGitURL() string { return *r.GitURL } +// GetHasDiscussions returns the HasDiscussions field if it's non-nil, zero value otherwise. +func (r *Repository) GetHasDiscussions() bool { + if r == nil || r.HasDiscussions == nil { + return false + } + return *r.HasDiscussions +} + // GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise. func (r *Repository) GetHasDownloads() bool { if r == nil || r.HasDownloads == nil { @@ -14539,7 +16739,23 @@ func (r *Repository) GetMasterBranch() string { if r == nil || r.MasterBranch == nil { return "" } - return *r.MasterBranch + return *r.MasterBranch +} + +// GetMergeCommitMessage returns the MergeCommitMessage field if it's non-nil, zero value otherwise. +func (r *Repository) GetMergeCommitMessage() string { + if r == nil || r.MergeCommitMessage == nil { + return "" + } + return *r.MergeCommitMessage +} + +// GetMergeCommitTitle returns the MergeCommitTitle field if it's non-nil, zero value otherwise. +func (r *Repository) GetMergeCommitTitle() string { + if r == nil || r.MergeCommitTitle == nil { + return "" + } + return *r.MergeCommitTitle } // GetMergesURL returns the MergesURL field if it's non-nil, zero value otherwise. @@ -14710,6 +16926,22 @@ func (r *Repository) GetSource() *Repository { return r.Source } +// GetSquashMergeCommitMessage returns the SquashMergeCommitMessage field if it's non-nil, zero value otherwise. +func (r *Repository) GetSquashMergeCommitMessage() string { + if r == nil || r.SquashMergeCommitMessage == nil { + return "" + } + return *r.SquashMergeCommitMessage +} + +// GetSquashMergeCommitTitle returns the SquashMergeCommitTitle field if it's non-nil, zero value otherwise. +func (r *Repository) GetSquashMergeCommitTitle() string { + if r == nil || r.SquashMergeCommitTitle == nil { + return "" + } + return *r.SquashMergeCommitTitle +} + // GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise. func (r *Repository) GetSSHURL() string { if r == nil || r.SSHURL == nil { @@ -14862,6 +17094,22 @@ func (r *Repository) GetWatchersCount() int { return *r.WatchersCount } +// GetWebCommitSignoffRequired returns the WebCommitSignoffRequired field if it's non-nil, zero value otherwise. +func (r *Repository) GetWebCommitSignoffRequired() bool { + if r == nil || r.WebCommitSignoffRequired == nil { + return false + } + return *r.WebCommitSignoffRequired +} + +// GetAccessLevel returns the AccessLevel field if it's non-nil, zero value otherwise. +func (r *RepositoryActionsAccessLevel) GetAccessLevel() string { + if r == nil || r.AccessLevel == nil { + return "" + } + return *r.AccessLevel +} + // GetAdvancedSecurityCommitters returns the AdvancedSecurityCommitters field if it's non-nil, zero value otherwise. func (r *RepositoryActiveCommitters) GetAdvancedSecurityCommitters() int { if r == nil || r.AdvancedSecurityCommitters == nil { @@ -14895,9 +17143,9 @@ func (r *RepositoryComment) GetCommitID() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetCreatedAt() time.Time { +func (r *RepositoryComment) GetCreatedAt() Timestamp { if r == nil || r.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *r.CreatedAt } @@ -14951,9 +17199,9 @@ func (r *RepositoryComment) GetReactions() *Reactions { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepositoryComment) GetUpdatedAt() time.Time { +func (r *RepositoryComment) GetUpdatedAt() Timestamp { if r == nil || r.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *r.UpdatedAt } @@ -15582,6 +17830,14 @@ func (r *RepositoryRelease) GetID() int64 { return *r.ID } +// GetMakeLatest returns the MakeLatest field if it's non-nil, zero value otherwise. +func (r *RepositoryRelease) GetMakeLatest() string { + if r == nil || r.MakeLatest == nil { + return "" + } + return *r.MakeLatest +} + // GetName returns the Name field if it's non-nil, zero value otherwise. func (r *RepositoryRelease) GetName() string { if r == nil || r.Name == nil { @@ -15895,9 +18151,9 @@ func (r *RepoStatus) GetContext() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetCreatedAt() time.Time { +func (r *RepoStatus) GetCreatedAt() Timestamp { if r == nil || r.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *r.CreatedAt } @@ -15951,9 +18207,9 @@ func (r *RepoStatus) GetTargetURL() string { } // GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. -func (r *RepoStatus) GetUpdatedAt() time.Time { +func (r *RepoStatus) GetUpdatedAt() Timestamp { if r == nil || r.UpdatedAt == nil { - return time.Time{} + return Timestamp{} } return *r.UpdatedAt } @@ -15966,6 +18222,30 @@ func (r *RepoStatus) GetURL() string { return *r.URL } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (r *RequireCodeOwnerReviewChanges) GetFrom() bool { + if r == nil || r.From == nil { + return false + } + return *r.From +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (r *RequiredConversationResolutionLevelChanges) GetFrom() string { + if r == nil || r.From == nil { + return "" + } + return *r.From +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (r *RequiredDeploymentsEnforcementLevelChanges) GetFrom() string { + if r == nil || r.From == nil { + return "" + } + return *r.From +} + // GetType returns the Type field if it's non-nil, zero value otherwise. func (r *RequiredReviewer) GetType() string { if r == nil || r.Type == nil { @@ -15982,6 +18262,30 @@ func (r *RequiredStatusCheck) GetAppID() int64 { return *r.AppID } +// GetContextsURL returns the ContextsURL field if it's non-nil, zero value otherwise. +func (r *RequiredStatusChecks) GetContextsURL() string { + if r == nil || r.ContextsURL == nil { + return "" + } + return *r.ContextsURL +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (r *RequiredStatusChecks) GetURL() string { + if r == nil || r.URL == nil { + return "" + } + return *r.URL +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (r *RequiredStatusChecksEnforcementLevelChanges) GetFrom() string { + if r == nil || r.From == nil { + return "" + } + return *r.From +} + // GetStrict returns the Strict field if it's non-nil, zero value otherwise. func (r *RequiredStatusChecksRequest) GetStrict() bool { if r == nil || r.Strict == nil { @@ -15990,6 +18294,14 @@ func (r *RequiredStatusChecksRequest) GetStrict() bool { return *r.Strict } +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (r *RequiredWorkflowSelectedRepos) GetTotalCount() int { + if r == nil || r.TotalCount == nil { + return 0 + } + return *r.TotalCount +} + // GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. func (r *ReviewersRequest) GetNodeID() string { if r == nil || r.NodeID == nil { @@ -16182,6 +18494,14 @@ func (r *RunnerGroup) GetName() string { return *r.Name } +// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. +func (r *RunnerGroup) GetRestrictedToWorkflows() bool { + if r == nil || r.RestrictedToWorkflows == nil { + return false + } + return *r.RestrictedToWorkflows +} + // GetRunnersURL returns the RunnersURL field if it's non-nil, zero value otherwise. func (r *RunnerGroup) GetRunnersURL() string { if r == nil || r.RunnersURL == nil { @@ -16206,6 +18526,14 @@ func (r *RunnerGroup) GetVisibility() string { return *r.Visibility } +// GetWorkflowRestrictionsReadOnly returns the WorkflowRestrictionsReadOnly field if it's non-nil, zero value otherwise. +func (r *RunnerGroup) GetWorkflowRestrictionsReadOnly() bool { + if r == nil || r.WorkflowRestrictionsReadOnly == nil { + return false + } + return *r.WorkflowRestrictionsReadOnly +} + // GetID returns the ID field if it's non-nil, zero value otherwise. func (r *RunnerLabels) GetID() int64 { if r == nil || r.ID == nil { @@ -16414,6 +18742,62 @@ func (s *ScanningAnalysis) GetWarning() string { return *s.Warning } +// GetCreated returns the Created field if it's non-nil, zero value otherwise. +func (s *SCIMMeta) GetCreated() Timestamp { + if s == nil || s.Created == nil { + return Timestamp{} + } + return *s.Created +} + +// GetLastModified returns the LastModified field if it's non-nil, zero value otherwise. +func (s *SCIMMeta) GetLastModified() Timestamp { + if s == nil || s.LastModified == nil { + return Timestamp{} + } + return *s.LastModified +} + +// GetLocation returns the Location field if it's non-nil, zero value otherwise. +func (s *SCIMMeta) GetLocation() string { + if s == nil || s.Location == nil { + return "" + } + return *s.Location +} + +// GetResourceType returns the ResourceType field if it's non-nil, zero value otherwise. +func (s *SCIMMeta) GetResourceType() string { + if s == nil || s.ResourceType == nil { + return "" + } + return *s.ResourceType +} + +// GetItemsPerPage returns the ItemsPerPage field if it's non-nil, zero value otherwise. +func (s *SCIMProvisionedIdentities) GetItemsPerPage() int { + if s == nil || s.ItemsPerPage == nil { + return 0 + } + return *s.ItemsPerPage +} + +// GetStartIndex returns the StartIndex field if it's non-nil, zero value otherwise. +func (s *SCIMProvisionedIdentities) GetStartIndex() int { + if s == nil || s.StartIndex == nil { + return 0 + } + return *s.StartIndex +} + +// GetTotalResults returns the TotalResults field if it's non-nil, zero value otherwise. +func (s *SCIMProvisionedIdentities) GetTotalResults() int { + if s == nil || s.TotalResults == nil { + return 0 + } + return *s.TotalResults +} + // GetActive returns the Active field if it's non-nil, zero value otherwise. func (s *SCIMUserAttributes) GetActive() bool { if s == nil || s.Active == nil { @@ -16438,6 +18822,22 @@ func (s *SCIMUserAttributes) GetExternalID() string { return *s.ExternalID } +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (s *SCIMUserAttributes) GetID() string { + if s == nil || s.ID == nil { + return "" + } + return *s.ID +} + +// GetMeta returns the Meta field. +func (s *SCIMUserAttributes) GetMeta() *SCIMMeta { + if s == nil { + return nil + } + return s.Meta +} + // GetPrimary returns the Primary field if it's non-nil, zero value otherwise. func (s *SCIMUserEmail) GetPrimary() bool { if s == nil || s.Primary == nil { @@ -16726,6 +19126,14 @@ func (s *SecretScanningAlertUpdateOptions) GetState() string { return *s.State } +// GetStatus returns the Status field if it's non-nil, zero value otherwise. +func (s *SecretScanningPushProtection) GetStatus() string { + if s == nil || s.Status == nil { + return "" + } + return *s.Status +} + // GetDescription returns the Description field if it's non-nil, zero value otherwise. func (s *SecurityAdvisory) GetDescription() string { if s == nil || s.Description == nil { @@ -16814,6 +19222,14 @@ func (s *SecurityAndAnalysis) GetSecretScanning() *SecretScanning { return s.SecretScanning } +// GetSecretScanningPushProtection returns the SecretScanningPushProtection field. +func (s *SecurityAndAnalysis) GetSecretScanningPushProtection() *SecretScanningPushProtection { + if s == nil { + return nil + } + return s.SecretScanningPushProtection +} + // GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. func (s *SelectedReposList) GetTotalCount() int { if s == nil || s.TotalCount == nil { @@ -16830,6 +19246,14 @@ func (s *ServiceHook) GetName() string { return *s.Name } +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (s *SignatureRequirementEnforcementLevelChanges) GetFrom() string { + if s == nil || s.From == nil { + return "" + } + return *s.From +} + // GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. func (s *SignaturesProtectedBranch) GetEnabled() bool { if s == nil || s.Enabled == nil { @@ -16974,6 +19398,38 @@ func (s *SourceImportAuthor) GetURL() string { return *s.URL } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (s *SSHSigningKey) GetCreatedAt() Timestamp { + if s == nil || s.CreatedAt == nil { + return Timestamp{} + } + return *s.CreatedAt +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (s *SSHSigningKey) GetID() int64 { + if s == nil || s.ID == nil { + return 0 + } + return *s.ID +} + +// GetKey returns the Key field if it's non-nil, zero value otherwise. +func (s *SSHSigningKey) GetKey() string { + if s == nil || s.Key == nil { + return "" + } + return *s.Key +} + +// GetTitle returns the Title field if it's non-nil, zero value otherwise. +func (s *SSHSigningKey) GetTitle() string { + if s == nil || s.Title == nil { + return "" + } + return *s.Title +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (s *StarEvent) GetAction() string { if s == nil || s.Action == nil { @@ -17278,6 +19734,22 @@ func (t *Tag) GetVerification() *SignatureVerification { return t.Verification } +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (t *TagProtection) GetID() int64 { + if t == nil || t.ID == nil { + return 0 + } + return *t.ID +} + +// GetPattern returns the Pattern field if it's non-nil, zero value otherwise. +func (t *TagProtection) GetPattern() string { + if t == nil || t.Pattern == nil { + return "" + } + return *t.Pattern +} + // GetCompletedAt returns the CompletedAt field if it's non-nil, zero value otherwise. func (t *TaskStep) GetCompletedAt() Timestamp { if t == nil || t.CompletedAt == nil { @@ -18023,9 +20495,9 @@ func (t *Timeline) GetCommitURL() string { } // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. -func (t *Timeline) GetCreatedAt() time.Time { +func (t *Timeline) GetCreatedAt() Timestamp { if t == nil || t.CreatedAt == nil { - return time.Time{} + return Timestamp{} } return *t.CreatedAt } @@ -18086,6 +20558,14 @@ func (t *Timeline) GetRename() *Rename { return t.Rename } +// GetRequestedTeam returns the RequestedTeam field. +func (t *Timeline) GetRequestedTeam() *Team { + if t == nil { + return nil + } + return t.RequestedTeam +} + // GetRequester returns the Requester field. func (t *Timeline) GetRequester() *User { if t == nil { @@ -18127,9 +20607,9 @@ func (t *Timeline) GetState() string { } // GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise. -func (t *Timeline) GetSubmittedAt() time.Time { +func (t *Timeline) GetSubmittedAt() Timestamp { if t == nil || t.SubmittedAt == nil { - return time.Time{} + return Timestamp{} } return *t.SubmittedAt } @@ -18382,6 +20862,14 @@ func (t *TrafficViews) GetUniques() int { return *t.Uniques } +// GetNewName returns the NewName field if it's non-nil, zero value otherwise. +func (t *TransferRequest) GetNewName() string { + if t == nil || t.NewName == nil { + return "" + } + return *t.NewName +} + // GetSHA returns the SHA field if it's non-nil, zero value otherwise. func (t *Tree) GetSHA() string { if t == nil || t.SHA == nil { @@ -18526,6 +21014,14 @@ func (u *UpdateRunnerGroupRequest) GetName() string { return *u.Name } +// GetRestrictedToWorkflows returns the RestrictedToWorkflows field if it's non-nil, zero value otherwise. +func (u *UpdateRunnerGroupRequest) GetRestrictedToWorkflows() bool { + if u == nil || u.RestrictedToWorkflows == nil { + return false + } + return *u.RestrictedToWorkflows +} + // GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. func (u *UpdateRunnerGroupRequest) GetVisibility() string { if u == nil || u.Visibility == nil { @@ -18727,7 +21223,7 @@ func (u *User) GetOrganizationsURL() string { } // GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetOwnedPrivateRepos() int { +func (u *User) GetOwnedPrivateRepos() int64 { if u == nil || u.OwnedPrivateRepos == nil { return 0 } @@ -18831,7 +21327,7 @@ func (u *User) GetSuspendedAt() Timestamp { } // GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise. -func (u *User) GetTotalPrivateRepos() int { +func (u *User) GetTotalPrivateRepos() int64 { if u == nil || u.TotalPrivateRepos == nil { return 0 } @@ -19534,30 +22030,6 @@ func (w *WorkflowDispatchEvent) GetWorkflow() string { return *w.Workflow } -// GetMacOS returns the MacOS field. -func (w *WorkflowEnvironment) GetMacOS() *WorkflowBill { - if w == nil { - return nil - } - return w.MacOS -} - -// GetUbuntu returns the Ubuntu field. -func (w *WorkflowEnvironment) GetUbuntu() *WorkflowBill { - if w == nil { - return nil - } - return w.Ubuntu -} - -// GetWindows returns the Windows field. -func (w *WorkflowEnvironment) GetWindows() *WorkflowBill { - if w == nil { - return nil - } - return w.Windows -} - // GetCheckRunURL returns the CheckRunURL field if it's non-nil, zero value otherwise. func (w *WorkflowJob) GetCheckRunURL() string { if w == nil || w.CheckRunURL == nil { @@ -19582,6 +22054,22 @@ func (w *WorkflowJob) GetConclusion() string { return *w.Conclusion } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (w *WorkflowJob) GetCreatedAt() Timestamp { + if w == nil || w.CreatedAt == nil { + return Timestamp{} + } + return *w.CreatedAt +} + +// GetHeadBranch returns the HeadBranch field if it's non-nil, zero value otherwise. +func (w *WorkflowJob) GetHeadBranch() string { + if w == nil || w.HeadBranch == nil { + return "" + } + return *w.HeadBranch +} + // GetHeadSHA returns the HeadSHA field if it's non-nil, zero value otherwise. func (w *WorkflowJob) GetHeadSHA() string { if w == nil || w.HeadSHA == nil { @@ -19622,6 +22110,14 @@ func (w *WorkflowJob) GetNodeID() string { return *w.NodeID } +// GetRunAttempt returns the RunAttempt field if it's non-nil, zero value otherwise. +func (w *WorkflowJob) GetRunAttempt() int64 { + if w == nil || w.RunAttempt == nil { + return 0 + } + return *w.RunAttempt +} + // GetRunID returns the RunID field if it's non-nil, zero value otherwise. func (w *WorkflowJob) GetRunID() int64 { if w == nil || w.RunID == nil { @@ -19694,6 +22190,14 @@ func (w *WorkflowJob) GetURL() string { return *w.URL } +// GetWorkflowName returns the WorkflowName field if it's non-nil, zero value otherwise. +func (w *WorkflowJob) GetWorkflowName() string { + if w == nil || w.WorkflowName == nil { + return "" + } + return *w.WorkflowName +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (w *WorkflowJobEvent) GetAction() string { if w == nil || w.Action == nil { @@ -19806,6 +22310,14 @@ func (w *WorkflowRun) GetCreatedAt() Timestamp { return *w.CreatedAt } +// GetDisplayTitle returns the DisplayTitle field if it's non-nil, zero value otherwise. +func (w *WorkflowRun) GetDisplayTitle() string { + if w == nil || w.DisplayTitle == nil { + return "" + } + return *w.DisplayTitle +} + // GetEvent returns the Event field if it's non-nil, zero value otherwise. func (w *WorkflowRun) GetEvent() string { if w == nil || w.Event == nil { @@ -20006,30 +22518,6 @@ func (w *WorkflowRunBill) GetTotalMS() int64 { return *w.TotalMS } -// GetMacOS returns the MacOS field. -func (w *WorkflowRunEnvironment) GetMacOS() *WorkflowRunBill { - if w == nil { - return nil - } - return w.MacOS -} - -// GetUbuntu returns the Ubuntu field. -func (w *WorkflowRunEnvironment) GetUbuntu() *WorkflowRunBill { - if w == nil { - return nil - } - return w.Ubuntu -} - -// GetWindows returns the Windows field. -func (w *WorkflowRunEnvironment) GetWindows() *WorkflowRunBill { - if w == nil { - return nil - } - return w.Windows -} - // GetAction returns the Action field if it's non-nil, zero value otherwise. func (w *WorkflowRunEvent) GetAction() string { if w == nil || w.Action == nil { @@ -20111,7 +22599,7 @@ func (w *WorkflowRuns) GetTotalCount() int { } // GetBillable returns the Billable field. -func (w *WorkflowRunUsage) GetBillable() *WorkflowRunEnvironment { +func (w *WorkflowRunUsage) GetBillable() *WorkflowRunBillMap { if w == nil { return nil } @@ -20135,7 +22623,7 @@ func (w *Workflows) GetTotalCount() int { } // GetBillable returns the Billable field. -func (w *WorkflowUsage) GetBillable() *WorkflowEnvironment { +func (w *WorkflowUsage) GetBillable() *WorkflowBillMap { if w == nil { return nil } diff --git a/vendor/github.com/google/go-github/v45/github/github.go b/vendor/github.com/google/go-github/v53/github/github.go similarity index 83% rename from vendor/github.com/google/go-github/v45/github/github.go rename to vendor/github.com/google/go-github/v53/github/github.go index 08b7db8e55..7d8aef5302 100644 --- a/vendor/github.com/google/go-github/v45/github/github.go +++ b/vendor/github.com/google/go-github/v53/github/github.go @@ -15,7 +15,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "reflect" @@ -25,17 +24,23 @@ import ( "time" "github.com/google/go-querystring/query" + "golang.org/x/oauth2" ) const ( - defaultBaseURL = "https://api.github.com/" - uploadBaseURL = "https://uploads.github.com/" - userAgent = "go-github" + Version = "v53.0.0" + defaultAPIVersion = "2022-11-28" + defaultBaseURL = "https://api.github.com/" + defaultUserAgent = "go-github" + "/" + Version + uploadBaseURL = "https://uploads.github.com/" + + headerAPIVersion = "X-GitHub-Api-Version" headerRateLimit = "X-RateLimit-Limit" headerRateRemaining = "X-RateLimit-Remaining" headerRateReset = "X-RateLimit-Reset" headerOTP = "X-GitHub-OTP" + headerRetryAfter = "Retry-After" headerTokenExpiration = "GitHub-Authentication-Token-Expiration" @@ -167,8 +172,9 @@ type Client struct { // User agent used when communicating with the GitHub API. UserAgent string - rateMu sync.Mutex - rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls. + rateMu sync.Mutex + rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls. + secondaryRateLimitReset time.Time // Secondary rate limit reset for the client as determined by the most recent API calls. common service // Reuse a single struct instead of allocating one for each service on the heap. @@ -235,6 +241,14 @@ type ListCursorOptions struct { // For paginated result sets, the number of results to include per page. PerPage int `url:"per_page,omitempty"` + // For paginated result sets, the number of results per page (max 100), starting from the first matching result. + // This parameter must not be used in combination with last. + First int `url:"first,omitempty"` + + // For paginated result sets, the number of results per page (max 100), starting from the last matching result. + // This parameter must not be used in combination with first. + Last int `url:"last,omitempty"` + // A cursor, as given in the Link header. If specified, the query only searches for events after this cursor. After string `url:"after,omitempty"` @@ -301,7 +315,7 @@ func NewClient(httpClient *http.Client) *Client { baseURL, _ := url.Parse(defaultBaseURL) uploadURL, _ := url.Parse(uploadBaseURL) - c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL} + c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: defaultUserAgent, UploadURL: uploadURL} c.common.client = c c.Actions = (*ActionsService)(&c.common) c.Activity = (*ActivityService)(&c.common) @@ -335,6 +349,16 @@ func NewClient(httpClient *http.Client) *Client { return c } +// NewClientWithEnvProxy enhances NewClient with the HttpProxy env. +func NewClientWithEnvProxy() *Client { + return NewClient(&http.Client{Transport: &http.Transport{Proxy: http.ProxyFromEnvironment}}) +} + +// NewTokenClient returns a new GitHub API client authenticated with the provided token. +func NewTokenClient(ctx context.Context, token string) *Client { + return NewClient(oauth2.NewClient(ctx, oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}))) +} + // NewEnterpriseClient returns a new GitHub API client with provided // base URL and upload URL (often is your GitHub Enterprise hostname). // If the base URL does not have the suffix "/api/v3/", it will be added automatically. @@ -383,12 +407,24 @@ func NewEnterpriseClient(baseURL, uploadURL string, httpClient *http.Client) (*C return c, nil } +// RequestOption represents an option that can modify an http.Request. +type RequestOption func(req *http.Request) + +// WithVersion overrides the GitHub v3 API version for this individual request. +// For more information, see: +// https://github.blog/2022-11-28-to-infinity-and-beyond-enabling-the-future-of-githubs-rest-api-with-api-versioning/ +func WithVersion(version string) RequestOption { + return func(req *http.Request) { + req.Header.Set(headerAPIVersion, version) + } +} + // NewRequest creates an API request. A relative URL can be provided in urlStr, // in which case it is resolved relative to the BaseURL of the Client. // Relative URLs should always be specified without a preceding slash. If // specified, the value pointed to by body is JSON encoded and included as the // request body. -func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) { +func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...RequestOption) (*http.Request, error) { if !strings.HasSuffix(c.BaseURL.Path, "/") { return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) } @@ -421,13 +457,52 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ if c.UserAgent != "" { req.Header.Set("User-Agent", c.UserAgent) } + req.Header.Set(headerAPIVersion, defaultAPIVersion) + + for _, opt := range opts { + opt(req) + } + + return req, nil +} + +// NewFormRequest creates an API request. A relative URL can be provided in urlStr, +// in which case it is resolved relative to the BaseURL of the Client. +// Relative URLs should always be specified without a preceding slash. +// Body is sent with Content-Type: application/x-www-form-urlencoded. +func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOption) (*http.Request, error) { + if !strings.HasSuffix(c.BaseURL.Path, "/") { + return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) + } + + u, err := c.BaseURL.Parse(urlStr) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", mediaTypeV3) + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } + req.Header.Set(headerAPIVersion, defaultAPIVersion) + + for _, opt := range opts { + opt(req) + } + return req, nil } // NewUploadRequest creates an upload request. A relative URL can be provided in // urlStr, in which case it is resolved relative to the UploadURL of the Client. // Relative URLs should always be specified without a preceding slash. -func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) { +func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string, opts ...RequestOption) (*http.Request, error) { if !strings.HasSuffix(c.UploadURL.Path, "/") { return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL) } @@ -449,6 +524,12 @@ func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, m req.Header.Set("Content-Type", mediaType) req.Header.Set("Accept", mediaTypeV3) req.Header.Set("User-Agent", c.UserAgent) + req.Header.Set(headerAPIVersion, defaultAPIVersion) + + for _, opt := range opts { + opt(req) + } + return req, nil } @@ -496,7 +577,8 @@ type Response struct { // propagate to Response. Rate Rate - // token's expiration date + // token's expiration date. Timestamp is 0001-01-01 when token doesn't expire. + // So it is valid for TokenExpiration.Equal(Timestamp{}) or TokenExpiration.Time.After(time.Now()) TokenExpiration Timestamp } @@ -596,15 +678,44 @@ func parseRate(r *http.Response) Rate { return rate } +// parseSecondaryRate parses the secondary rate related headers, +// and returns the time to retry after. +func parseSecondaryRate(r *http.Response) *time.Duration { + // According to GitHub support, the "Retry-After" header value will be + // an integer which represents the number of seconds that one should + // wait before resuming making requests. + if v := r.Header.Get(headerRetryAfter); v != "" { + retryAfterSeconds, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop. + retryAfter := time.Duration(retryAfterSeconds) * time.Second + return &retryAfter + } + + // According to GitHub support, endpoints might return x-ratelimit-reset instead, + // as an integer which represents the number of seconds since epoch UTC, + // represting the time to resume making requests. + if v := r.Header.Get(headerRateReset); v != "" { + secondsSinceEpoch, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop. + retryAfter := time.Until(time.Unix(secondsSinceEpoch, 0)) + return &retryAfter + } + + return nil +} + // parseTokenExpiration parses the TokenExpiration related headers. +// Returns 0001-01-01 if the header is not defined or could not be parsed. func parseTokenExpiration(r *http.Response) Timestamp { - var exp Timestamp if v := r.Header.Get(headerTokenExpiration); v != "" { - if t, err := time.Parse("2006-01-02 03:04:05 MST", v); err == nil { - exp = Timestamp{t.Local()} + if t, err := time.Parse("2006-01-02 15:04:05 MST", v); err == nil { + return Timestamp{t.Local()} + } + // Some tokens include the timezone offset instead of the timezone. + // https://github.com/google/go-github/issues/2649 + if t, err := time.Parse("2006-01-02 15:04:05 -0700", v); err == nil { + return Timestamp{t.Local()} } } - return exp + return Timestamp{} // 0001-01-01 00:00:00 } type requestContext uint8 @@ -628,7 +739,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro req = withContext(ctx, req) - rateLimitCategory := category(req.URL.Path) + rateLimitCategory := category(req.Method, req.URL.Path) if bypass := ctx.Value(bypassRateLimitCheck); bypass == nil { // If we've hit rate limit, don't make further requests before Reset time. @@ -638,6 +749,12 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro Rate: err.Rate, }, err } + // If we've hit a secondary rate limit, don't make further requests before Retry After. + if err := c.checkSecondaryRateLimitBeforeDo(ctx, req); err != nil { + return &Response{ + Response: err.Response, + }, err + } } resp, err := c.client.Do(req) @@ -681,7 +798,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro // Issue #1022 aerr, ok := err.(*AcceptedError) if ok { - b, readErr := ioutil.ReadAll(resp.Body) + b, readErr := io.ReadAll(resp.Body) if readErr != nil { return response, readErr } @@ -689,6 +806,14 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro aerr.Raw = b err = aerr } + + // Update the secondary rate limit if we hit it. + rerr, ok := err.(*AbuseRateLimitError) + if ok && rerr.RetryAfter != nil { + c.rateMu.Lock() + c.secondaryRateLimitReset = time.Now().Add(*rerr.RetryAfter) + c.rateMu.Unlock() + } } return response, err } @@ -741,7 +866,7 @@ func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rat StatusCode: http.StatusForbidden, Request: req, Header: make(http.Header), - Body: ioutil.NopCloser(strings.NewReader("")), + Body: io.NopCloser(strings.NewReader("")), } return &RateLimitError{ Rate: rate, @@ -753,6 +878,35 @@ func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rat return nil } +// checkSecondaryRateLimitBeforeDo does not make any network calls, but uses existing knowledge from +// current client state in order to quickly check if *AbuseRateLimitError can be immediately returned +// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. +// Otherwise it returns nil, and Client.Do should proceed normally. +func (c *Client) checkSecondaryRateLimitBeforeDo(ctx context.Context, req *http.Request) *AbuseRateLimitError { + c.rateMu.Lock() + secondary := c.secondaryRateLimitReset + c.rateMu.Unlock() + if !secondary.IsZero() && time.Now().Before(secondary) { + // Create a fake response. + resp := &http.Response{ + Status: http.StatusText(http.StatusForbidden), + StatusCode: http.StatusForbidden, + Request: req, + Header: make(http.Header), + Body: io.NopCloser(strings.NewReader("")), + } + + retryAfter := time.Until(secondary) + return &AbuseRateLimitError{ + Response: resp, + Message: fmt.Sprintf("API secondary rate limit exceeded until %v, not making remote request.", secondary), + RetryAfter: &retryAfter, + } + } + + return nil +} + // compareHTTPResponse returns whether two http.Response objects are equal or not. // Currently, only StatusCode is checked. This function is used when implementing the // Is(error) bool interface for the custom error types in this package. @@ -773,7 +927,7 @@ An ErrorResponse reports one or more errors caused by an API request. GitHub API docs: https://docs.github.com/en/rest/#client-errors */ type ErrorResponse struct { - Response *http.Response // HTTP response that caused this error + Response *http.Response `json:"-"` // HTTP response that caused this error Message string `json:"message"` // error message Errors []Error `json:"errors"` // more detail on individual errors // Block is only populated on certain types of errors such as code 451. @@ -947,17 +1101,17 @@ func sanitizeURL(uri *url.URL) *url.URL { An Error reports more details on an individual error in an ErrorResponse. These are the possible validation error codes: - missing: - resource does not exist - missing_field: - a required field on a resource has not been set - invalid: - the formatting of a field is invalid - already_exists: - another resource has the same valid as this field - custom: - some resources return this (e.g. github.User.CreateKey()), additional - information is set in the Message field of the Error + missing: + resource does not exist + missing_field: + a required field on a resource has not been set + invalid: + the formatting of a field is invalid + already_exists: + another resource has the same valid as this field + custom: + some resources return this (e.g. github.User.CreateKey()), additional + information is set in the Message field of the Error GitHub error responses structure are often undocumented and inconsistent. Sometimes error is just a simple string (Issue #540). @@ -1003,14 +1157,14 @@ func CheckResponse(r *http.Response) error { } errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err == nil && data != nil { json.Unmarshal(data, errorResponse) } // Re-populate error response body because GitHub error responses are often // undocumented and inconsistent. // Issue #1136, #540. - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + r.Body = io.NopCloser(bytes.NewBuffer(data)) switch { case r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required"): return (*TwoFactorAuthError)(errorResponse) @@ -1027,13 +1181,8 @@ func CheckResponse(r *http.Response) error { Response: errorResponse.Response, Message: errorResponse.Message, } - if v := r.Header["Retry-After"]; len(v) > 0 { - // According to GitHub support, the "Retry-After" header value will be - // an integer which represents the number of seconds that one should - // wait before resuming making requests. - retryAfterSeconds, _ := strconv.ParseInt(v[0], 10, 64) // Error handling is noop. - retryAfter := time.Duration(retryAfterSeconds) * time.Second - abuseRateLimitError.RetryAfter = &retryAfter + if retryAfter := parseSecondaryRate(r); retryAfter != nil { + abuseRateLimitError.RetryAfter = retryAfter } return abuseRateLimitError default: @@ -1123,13 +1272,36 @@ const ( categories // An array of this length will be able to contain all rate limit categories. ) -// category returns the rate limit category of the endpoint, determined by Request.URL.Path. -func category(path string) rateLimitCategory { +// category returns the rate limit category of the endpoint, determined by HTTP method and Request.URL.Path. +func category(method, path string) rateLimitCategory { switch { + // https://docs.github.com/en/rest/rate-limit#about-rate-limits default: + // NOTE: coreCategory is returned for actionsRunnerRegistrationCategory too, + // because no API found for this category. return coreCategory case strings.HasPrefix(path, "/search/"): return searchCategory + case path == "/graphql": + return graphqlCategory + case strings.HasPrefix(path, "/app-manifests/") && + strings.HasSuffix(path, "/conversions") && + method == http.MethodPost: + return integrationManifestCategory + + // https://docs.github.com/en/rest/migrations/source-imports#start-an-import + case strings.HasPrefix(path, "/repos/") && + strings.HasSuffix(path, "/import") && + method == http.MethodPut: + return sourceImportCategory + + // https://docs.github.com/en/rest/code-scanning#upload-an-analysis-as-sarif-data + case strings.HasSuffix(path, "/code-scanning/sarifs"): + return codeScanningUploadCategory + + // https://docs.github.com/en/enterprise-cloud@latest/rest/scim + case strings.HasPrefix(path, "/scim/"): + return scimCategory } } @@ -1322,8 +1494,8 @@ func formatRateReset(d time.Duration) string { // When using roundTripWithOptionalFollowRedirect, note that it // is the responsibility of the caller to close the response body. -func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { - req, err := c.NewRequest("GET", u, nil) +func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u string, followRedirects bool, opts ...RequestOption) (*http.Response, error) { + req, err := c.NewRequest("GET", u, nil, opts...) if err != nil { return nil, err } @@ -1344,7 +1516,7 @@ func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u stri if followRedirects && resp.StatusCode == http.StatusMovedPermanently { resp.Body.Close() u = resp.Header.Get("Location") - resp, err = c.roundTripWithOptionalFollowRedirect(ctx, u, false) + resp, err = c.roundTripWithOptionalFollowRedirect(ctx, u, false, opts...) } return resp, err } diff --git a/vendor/github.com/google/go-github/v45/github/gitignore.go b/vendor/github.com/google/go-github/v53/github/gitignore.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/gitignore.go rename to vendor/github.com/google/go-github/v53/github/gitignore.go diff --git a/vendor/github.com/google/go-github/v45/github/interactions.go b/vendor/github.com/google/go-github/v53/github/interactions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/interactions.go rename to vendor/github.com/google/go-github/v53/github/interactions.go diff --git a/vendor/github.com/google/go-github/v45/github/interactions_orgs.go b/vendor/github.com/google/go-github/v53/github/interactions_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/interactions_orgs.go rename to vendor/github.com/google/go-github/v53/github/interactions_orgs.go diff --git a/vendor/github.com/google/go-github/v45/github/interactions_repos.go b/vendor/github.com/google/go-github/v53/github/interactions_repos.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/interactions_repos.go rename to vendor/github.com/google/go-github/v53/github/interactions_repos.go diff --git a/vendor/github.com/google/go-github/v45/github/issue_import.go b/vendor/github.com/google/go-github/v53/github/issue_import.go similarity index 91% rename from vendor/github.com/google/go-github/v45/github/issue_import.go rename to vendor/github.com/google/go-github/v53/github/issue_import.go index a9810407cc..4bc8d5f1d2 100644 --- a/vendor/github.com/google/go-github/v45/github/issue_import.go +++ b/vendor/github.com/google/go-github/v53/github/issue_import.go @@ -10,7 +10,6 @@ import ( "context" "encoding/json" "fmt" - "time" ) // IssueImportService handles communication with the issue import related @@ -29,9 +28,9 @@ type IssueImportRequest struct { type IssueImport struct { Title string `json:"title"` Body string `json:"body"` - CreatedAt *time.Time `json:"created_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + ClosedAt *Timestamp `json:"closed_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` Assignee *string `json:"assignee,omitempty"` Milestone *int `json:"milestone,omitempty"` Closed *bool `json:"closed,omitempty"` @@ -40,7 +39,7 @@ type IssueImport struct { // Comment represents comments of issue to import. type Comment struct { - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` Body string `json:"body"` } @@ -53,8 +52,8 @@ type IssueImportResponse struct { URL *string `json:"url,omitempty"` ImportIssuesURL *string `json:"import_issues_url,omitempty"` RepositoryURL *string `json:"repository_url,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` Message *string `json:"message,omitempty"` DocumentationURL *string `json:"documentation_url,omitempty"` Errors []*IssueImportError `json:"errors,omitempty"` @@ -126,7 +125,7 @@ func (s *IssueImportService) CheckStatus(ctx context.Context, owner, repo string // CheckStatusSince checks the status of multiple imported issues since a given date. // // https://gist.github.com/jonmagic/5282384165e0f86ef105#check-status-of-multiple-issues -func (s *IssueImportService) CheckStatusSince(ctx context.Context, owner, repo string, since time.Time) ([]*IssueImportResponse, *Response, error) { +func (s *IssueImportService) CheckStatusSince(ctx context.Context, owner, repo string, since Timestamp) ([]*IssueImportResponse, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import/issues?since=%v", owner, repo, since.Format("2006-01-02")) req, err := s.client.NewRequest("GET", u, nil) if err != nil { diff --git a/vendor/github.com/google/go-github/v45/github/issues.go b/vendor/github.com/google/go-github/v53/github/issues.go similarity index 93% rename from vendor/github.com/google/go-github/v45/github/issues.go rename to vendor/github.com/google/go-github/v53/github/issues.go index 12488f9815..42e58a17a8 100644 --- a/vendor/github.com/google/go-github/v45/github/issues.go +++ b/vendor/github.com/google/go-github/v53/github/issues.go @@ -25,9 +25,11 @@ type IssuesService service // this is an issue, and if PullRequestLinks is not nil, this is a pull request. // The IsPullRequest helper method can be used to check that. type Issue struct { - ID *int64 `json:"id,omitempty"` - Number *int `json:"number,omitempty"` - State *string `json:"state,omitempty"` + ID *int64 `json:"id,omitempty"` + Number *int `json:"number,omitempty"` + State *string `json:"state,omitempty"` + // StateReason can be one of: "completed", "not_planned", "reopened". + StateReason *string `json:"state_reason,omitempty"` Locked *bool `json:"locked,omitempty"` Title *string `json:"title,omitempty"` Body *string `json:"body,omitempty"` @@ -36,9 +38,9 @@ type Issue struct { Labels []*Label `json:"labels,omitempty"` Assignee *User `json:"assignee,omitempty"` Comments *int `json:"comments,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + ClosedAt *Timestamp `json:"closed_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` ClosedBy *User `json:"closed_by,omitempty"` URL *string `json:"url,omitempty"` HTMLURL *string `json:"html_url,omitempty"` @@ -77,13 +79,15 @@ func (i Issue) IsPullRequest() bool { // It is separate from Issue above because otherwise Labels // and Assignee fail to serialize to the correct JSON. type IssueRequest struct { - Title *string `json:"title,omitempty"` - Body *string `json:"body,omitempty"` - Labels *[]string `json:"labels,omitempty"` - Assignee *string `json:"assignee,omitempty"` - State *string `json:"state,omitempty"` - Milestone *int `json:"milestone,omitempty"` - Assignees *[]string `json:"assignees,omitempty"` + Title *string `json:"title,omitempty"` + Body *string `json:"body,omitempty"` + Labels *[]string `json:"labels,omitempty"` + Assignee *string `json:"assignee,omitempty"` + State *string `json:"state,omitempty"` + // StateReason can be 'completed' or 'not_planned'. + StateReason *string `json:"state_reason,omitempty"` + Milestone *int `json:"milestone,omitempty"` + Assignees *[]string `json:"assignees,omitempty"` } // IssueListOptions specifies the optional parameters to the IssuesService.List diff --git a/vendor/github.com/google/go-github/v45/github/issues_assignees.go b/vendor/github.com/google/go-github/v53/github/issues_assignees.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/issues_assignees.go rename to vendor/github.com/google/go-github/v53/github/issues_assignees.go diff --git a/vendor/github.com/google/go-github/v45/github/issues_comments.go b/vendor/github.com/google/go-github/v53/github/issues_comments.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/issues_comments.go rename to vendor/github.com/google/go-github/v53/github/issues_comments.go index 361ee49a69..17881c093d 100644 --- a/vendor/github.com/google/go-github/v45/github/issues_comments.go +++ b/vendor/github.com/google/go-github/v53/github/issues_comments.go @@ -18,8 +18,8 @@ type IssueComment struct { Body *string `json:"body,omitempty"` User *User `json:"user,omitempty"` Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` // AuthorAssociation is the comment author's relationship to the issue's repository. // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". AuthorAssociation *string `json:"author_association,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/issues_events.go b/vendor/github.com/google/go-github/v53/github/issues_events.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/issues_events.go rename to vendor/github.com/google/go-github/v53/github/issues_events.go index d8ffc0b542..ed07659170 100644 --- a/vendor/github.com/google/go-github/v45/github/issues_events.go +++ b/vendor/github.com/google/go-github/v53/github/issues_events.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // IssueEvent represents an event that occurred around an Issue or Pull Request. @@ -71,7 +70,7 @@ type IssueEvent struct { // Event *string `json:"event,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` Issue *Issue `json:"issue,omitempty"` // Only present on certain events; see above. diff --git a/vendor/github.com/google/go-github/v45/github/issues_labels.go b/vendor/github.com/google/go-github/v53/github/issues_labels.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/issues_labels.go rename to vendor/github.com/google/go-github/v53/github/issues_labels.go diff --git a/vendor/github.com/google/go-github/v45/github/issues_milestones.go b/vendor/github.com/google/go-github/v53/github/issues_milestones.go similarity index 95% rename from vendor/github.com/google/go-github/v45/github/issues_milestones.go rename to vendor/github.com/google/go-github/v53/github/issues_milestones.go index 3c9be2407e..897c7c0b6d 100644 --- a/vendor/github.com/google/go-github/v45/github/issues_milestones.go +++ b/vendor/github.com/google/go-github/v53/github/issues_milestones.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // Milestone represents a GitHub repository milestone. @@ -24,10 +23,10 @@ type Milestone struct { Creator *User `json:"creator,omitempty"` OpenIssues *int `json:"open_issues,omitempty"` ClosedIssues *int `json:"closed_issues,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - DueOn *time.Time `json:"due_on,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + ClosedAt *Timestamp `json:"closed_at,omitempty"` + DueOn *Timestamp `json:"due_on,omitempty"` NodeID *string `json:"node_id,omitempty"` } diff --git a/vendor/github.com/google/go-github/v45/github/issues_timeline.go b/vendor/github.com/google/go-github/v53/github/issues_timeline.go similarity index 95% rename from vendor/github.com/google/go-github/v45/github/issues_timeline.go rename to vendor/github.com/google/go-github/v53/github/issues_timeline.go index 9ec498e45c..9c73e6176d 100644 --- a/vendor/github.com/google/go-github/v45/github/issues_timeline.go +++ b/vendor/github.com/google/go-github/v53/github/issues_timeline.go @@ -9,7 +9,6 @@ import ( "context" "fmt" "strings" - "time" ) // Timeline represents an event that occurred around an Issue or Pull Request. @@ -35,6 +34,8 @@ type Timeline struct { SHA *string `json:"sha,omitempty"` // The commit message. Message *string `json:"message,omitempty"` + // A list of parent commits. + Parents []*Commit `json:"parents,omitempty"` // Event identifies the actual type of Event that occurred. Possible values // are: @@ -116,7 +117,7 @@ type Timeline struct { // The string SHA of a commit that referenced this Issue or Pull Request. CommitID *string `json:"commit_id,omitempty"` // The timestamp indicating when the event occurred. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` // The Label object including `name` and `color` attributes. Only provided for // 'labeled' and 'unlabeled' events. Label *Label `json:"label,omitempty"` @@ -142,12 +143,14 @@ type Timeline struct { // The person requested to review the pull request. Reviewer *User `json:"requested_reviewer,omitempty"` + // RequestedTeam contains the team requested to review the pull request. + RequestedTeam *Team `json:"requested_team,omitempty"` // The person who requested a review. Requester *User `json:"review_requester,omitempty"` // The review summary text. Body *string `json:"body,omitempty"` - SubmittedAt *time.Time `json:"submitted_at,omitempty"` + SubmittedAt *Timestamp `json:"submitted_at,omitempty"` } // Source represents a reference's source. diff --git a/vendor/github.com/google/go-github/v45/github/licenses.go b/vendor/github.com/google/go-github/v53/github/licenses.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/licenses.go rename to vendor/github.com/google/go-github/v53/github/licenses.go diff --git a/vendor/github.com/google/go-github/v45/github/messages.go b/vendor/github.com/google/go-github/v53/github/messages.go similarity index 85% rename from vendor/github.com/google/go-github/v45/github/messages.go rename to vendor/github.com/google/go-github/v53/github/messages.go index 44477ddb0d..8547b8810f 100644 --- a/vendor/github.com/google/go-github/v45/github/messages.go +++ b/vendor/github.com/google/go-github/v53/github/messages.go @@ -19,7 +19,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "mime" "net/http" "net/url" @@ -48,6 +47,7 @@ var ( "branch_protection_rule": "BranchProtectionRuleEvent", "check_run": "CheckRunEvent", "check_suite": "CheckSuiteEvent", + "code_scanning_alert": "CodeScanningAlertEvent", "commit_comment": "CommitCommentEvent", "content_reference": "ContentReferenceEvent", "create": "CreateEvent", @@ -55,7 +55,9 @@ var ( "deploy_key": "DeployKeyEvent", "deployment": "DeploymentEvent", "deployment_status": "DeploymentStatusEvent", + "deployment_protection_rule": "DeploymentProtectionRuleEvent", "discussion": "DiscussionEvent", + "discussion_comment": "DiscussionCommentEvent", "fork": "ForkEvent", "github_app_authorization": "GitHubAppAuthorizationEvent", "gollum": "GollumEvent", @@ -67,6 +69,7 @@ var ( "marketplace_purchase": "MarketplacePurchaseEvent", "member": "MemberEvent", "membership": "MembershipEvent", + "merge_group": "MergeGroupEvent", "meta": "MetaEvent", "milestone": "MilestoneEvent", "organization": "OrganizationEvent", @@ -146,30 +149,30 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) { return buf, hashFunc, nil } -// ValidatePayload validates an incoming GitHub Webhook event request body +// ValidatePayloadFromBody validates an incoming GitHub Webhook event request body // and returns the (JSON) payload. // The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded". // If the Content-Type is neither then an error is returned. // secretToken is the GitHub Webhook secret token. -// If your webhook does not contain a secret token, you can pass nil or an empty slice. -// This is intended for local development purposes only and all webhooks should ideally set up a secret token. +// If your webhook does not contain a secret token, you can pass an empty secretToken. +// Webhooks without a secret token are not secure and should be avoided. // // Example usage: // -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// // read signature from request -// signature := "" -// payload, err := github.ValidatePayloadFromBody(r.Header.Get("Content-Type"), r.Body, signature, s.webhookSecretKey) -// if err != nil { ... } -// // Process payload... -// } +// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// // read signature from request +// signature := "" +// payload, err := github.ValidatePayloadFromBody(r.Header.Get("Content-Type"), r.Body, signature, s.webhookSecretKey) +// if err != nil { ... } +// // Process payload... +// } func ValidatePayloadFromBody(contentType string, readable io.Reader, signature string, secretToken []byte) (payload []byte, err error) { var body []byte // Raw body that GitHub uses to calculate the signature. switch contentType { case "application/json": var err error - if body, err = ioutil.ReadAll(readable); err != nil { + if body, err = io.ReadAll(readable); err != nil { return nil, err } @@ -183,7 +186,7 @@ func ValidatePayloadFromBody(contentType string, readable io.Reader, signature s const payloadFormParam = "payload" var err error - if body, err = ioutil.ReadAll(readable); err != nil { + if body, err = io.ReadAll(readable); err != nil { return nil, err } @@ -199,9 +202,8 @@ func ValidatePayloadFromBody(contentType string, readable io.Reader, signature s return nil, fmt.Errorf("webhook request has unsupported Content-Type %q", contentType) } - // Only validate the signature if a secret token exists. This is intended for - // local development only and all webhooks should ideally set up a secret token. - if len(secretToken) > 0 { + // Validate the signature if present or if one is expected (secretToken is non-empty). + if len(secretToken) > 0 || len(signature) > 0 { if err := ValidateSignature(signature, body, secretToken); err != nil { return nil, err } @@ -220,12 +222,11 @@ func ValidatePayloadFromBody(contentType string, readable io.Reader, signature s // // Example usage: // -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// // Process payload... -// } -// +// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// payload, err := github.ValidatePayload(r, s.webhookSecretKey) +// if err != nil { ... } +// // Process payload... +// } func ValidatePayload(r *http.Request, secretToken []byte) (payload []byte, err error) { signature := r.Header.Get(SHA256SignatureHeader) if signature == "" { @@ -278,20 +279,19 @@ func DeliveryID(r *http.Request) string { // // Example usage: // -// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// payload, err := github.ValidatePayload(r, s.webhookSecretKey) -// if err != nil { ... } -// event, err := github.ParseWebHook(github.WebHookType(r), payload) -// if err != nil { ... } -// switch event := event.(type) { -// case *github.CommitCommentEvent: -// processCommitCommentEvent(event) -// case *github.CreateEvent: -// processCreateEvent(event) -// ... -// } -// } -// +// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// payload, err := github.ValidatePayload(r, s.webhookSecretKey) +// if err != nil { ... } +// event, err := github.ParseWebHook(github.WebHookType(r), payload) +// if err != nil { ... } +// switch event := event.(type) { +// case *github.CommitCommentEvent: +// processCommitCommentEvent(event) +// case *github.CreateEvent: +// processCreateEvent(event) +// ... +// } +// } func ParseWebHook(messageType string, payload []byte) (interface{}, error) { eventType, ok := eventTypeMapping[messageType] if !ok { diff --git a/vendor/github.com/google/go-github/v45/github/migrations.go b/vendor/github.com/google/go-github/v53/github/migrations.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/migrations.go rename to vendor/github.com/google/go-github/v53/github/migrations.go diff --git a/vendor/github.com/google/go-github/v45/github/migrations_source_import.go b/vendor/github.com/google/go-github/v53/github/migrations_source_import.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/migrations_source_import.go rename to vendor/github.com/google/go-github/v53/github/migrations_source_import.go diff --git a/vendor/github.com/google/go-github/v45/github/migrations_user.go b/vendor/github.com/google/go-github/v53/github/migrations_user.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/migrations_user.go rename to vendor/github.com/google/go-github/v53/github/migrations_user.go index b8a0d608d6..6586fdb2d3 100644 --- a/vendor/github.com/google/go-github/v45/github/migrations_user.go +++ b/vendor/github.com/google/go-github/v53/github/migrations_user.go @@ -97,8 +97,12 @@ func (s *MigrationService) StartUserMigration(ctx context.Context, repos []strin // ListUserMigrations lists the most recent migrations. // // GitHub API docs: https://docs.github.com/en/rest/migrations/users#list-user-migrations -func (s *MigrationService) ListUserMigrations(ctx context.Context) ([]*UserMigration, *Response, error) { +func (s *MigrationService) ListUserMigrations(ctx context.Context, opts *ListOptions) ([]*UserMigration, *Response, error) { u := "user/migrations" + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } req, err := s.client.NewRequest("GET", u, nil) if err != nil { diff --git a/vendor/github.com/google/go-github/v45/github/misc.go b/vendor/github.com/google/go-github/v53/github/misc.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/misc.go rename to vendor/github.com/google/go-github/v53/github/misc.go index 412d1e2b95..8961524157 100644 --- a/vendor/github.com/google/go-github/v45/github/misc.go +++ b/vendor/github.com/google/go-github/v53/github/misc.go @@ -176,6 +176,14 @@ type APIMeta struct { // An array of SSH keys. SSHKeys []string `json:"ssh_keys,omitempty"` + + // An array of IP addresses in CIDR format specifying the addresses + // which serve GitHub websites. + Web []string `json:"web,omitempty"` + + // An array of IP addresses in CIDR format specifying the addresses + // which serve GitHub APIs. + API []string `json:"api,omitempty"` } // APIMeta returns information about GitHub.com, the service. Or, if you access diff --git a/vendor/github.com/google/go-github/v45/github/orgs.go b/vendor/github.com/google/go-github/v53/github/orgs.go similarity index 83% rename from vendor/github.com/google/go-github/v45/github/orgs.go rename to vendor/github.com/google/go-github/v53/github/orgs.go index 26b55c62d0..0c7e361b3f 100644 --- a/vendor/github.com/google/go-github/v45/github/orgs.go +++ b/vendor/github.com/google/go-github/v53/github/orgs.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // OrganizationsService provides access to the organization related functions @@ -35,10 +34,10 @@ type Organization struct { PublicGists *int `json:"public_gists,omitempty"` Followers *int `json:"followers,omitempty"` Following *int `json:"following,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + TotalPrivateRepos *int64 `json:"total_private_repos,omitempty"` + OwnedPrivateRepos *int64 `json:"owned_private_repos,omitempty"` PrivateGists *int `json:"private_gists,omitempty"` DiskUsage *int `json:"disk_usage,omitempty"` Collaborators *int `json:"collaborators,omitempty"` @@ -82,6 +81,20 @@ type Organization struct { MembersCanCreatePublicPages *bool `json:"members_can_create_public_pages,omitempty"` // MembersCanCreatePrivatePages toggles whether organization members can create private GitHub Pages sites. MembersCanCreatePrivatePages *bool `json:"members_can_create_private_pages,omitempty"` + // WebCommitSignoffRequire toggles + WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` + // AdvancedSecurityAuditLogEnabled toggles whether the advanced security audit log is enabled. + AdvancedSecurityEnabledForNewRepos *bool `json:"advanced_security_enabled_for_new_repositories,omitempty"` + // DependabotAlertsEnabled toggles whether dependabot alerts are enabled. + DependabotAlertsEnabledForNewRepos *bool `json:"dependabot_alerts_enabled_for_new_repositories,omitempty"` + // DependabotSecurityUpdatesEnabled toggles whether dependabot security updates are enabled. + DependabotSecurityUpdatesEnabledForNewRepos *bool `json:"dependabot_security_updates_enabled_for_new_repositories,omitempty"` + // DependabotGraphEnabledForNewRepos toggles whether dependabot graph is enabled on new repositories. + DependencyGraphEnabledForNewRepos *bool `json:"dependency_graph_enabled_for_new_repositories,omitempty"` + // SecretScanningEnabled toggles whether secret scanning is enabled on new repositories. + SecretScanningEnabledForNewRepos *bool `json:"secret_scanning_enabled_for_new_repositories,omitempty"` + // SecretScanningPushProtectionEnabledForNewRepos toggles whether secret scanning push protection is enabled on new repositories. + SecretScanningPushProtectionEnabledForNewRepos *bool `json:"secret_scanning_push_protection_enabled_for_new_repositories,omitempty"` // API URLs URL *string `json:"url,omitempty"` @@ -108,7 +121,7 @@ type Plan struct { Name *string `json:"name,omitempty"` Space *int `json:"space,omitempty"` Collaborators *int `json:"collaborators,omitempty"` - PrivateRepos *int `json:"private_repos,omitempty"` + PrivateRepos *int64 `json:"private_repos,omitempty"` FilledSeats *int `json:"filled_seats,omitempty"` Seats *int `json:"seats,omitempty"` } @@ -249,6 +262,19 @@ func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organ return o, resp, nil } +// Delete an organization by name. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#delete-an-organization +func (s *OrganizationsService) Delete(ctx context.Context, org string) (*Response, error) { + u := fmt.Sprintf("orgs/%v", org) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + // ListInstallations lists installations for an organization. // // GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-app-installations-for-an-organization diff --git a/vendor/github.com/google/go-github/v45/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_actions_allowed.go rename to vendor/github.com/google/go-github/v53/github/orgs_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v45/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_actions_permissions.go rename to vendor/github.com/google/go-github/v53/github/orgs_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go new file mode 100644 index 0000000000..e2e4692e57 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/orgs_audit_log.go @@ -0,0 +1,148 @@ +// Copyright 2021 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// GetAuditLogOptions sets up optional parameters to query audit-log endpoint. +type GetAuditLogOptions struct { + Phrase *string `url:"phrase,omitempty"` // A search phrase. (Optional.) + Include *string `url:"include,omitempty"` // Event type includes. Can be one of "web", "git", "all". Default: "web". (Optional.) + Order *string `url:"order,omitempty"` // The order of audit log events. Can be one of "asc" or "desc". Default: "desc". (Optional.) + + ListCursorOptions +} + +// HookConfig describes metadata about a webhook configuration. +type HookConfig struct { + ContentType *string `json:"content_type,omitempty"` + InsecureSSL *string `json:"insecure_ssl,omitempty"` + URL *string `json:"url,omitempty"` + + // Secret is returned obfuscated by GitHub, but it can be set for outgoing requests. + Secret *string `json:"secret,omitempty"` +} + +// ActorLocation contains information about reported location for an actor. +type ActorLocation struct { + CountryCode *string `json:"country_code,omitempty"` +} + +// PolicyOverrideReason contains user-supplied information about why a policy was overridden. +type PolicyOverrideReason struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// AuditEntry describes the fields that may be represented by various audit-log "action" entries. +// For a list of actions see - https://docs.github.com/en/github/setting-up-and-managing-organizations-and-teams/reviewing-the-audit-log-for-your-organization#audit-log-actions +type AuditEntry struct { + ActorIP *string `json:"actor_ip,omitempty"` + Action *string `json:"action,omitempty"` // The name of the action that was performed, for example `user.login` or `repo.create`. + Active *bool `json:"active,omitempty"` + ActiveWas *bool `json:"active_was,omitempty"` + Actor *string `json:"actor,omitempty"` // The actor who performed the action. + ActorLocation *ActorLocation `json:"actor_location,omitempty"` + BlockedUser *string `json:"blocked_user,omitempty"` + Business *string `json:"business,omitempty"` + CancelledAt *Timestamp `json:"cancelled_at,omitempty"` + CompletedAt *Timestamp `json:"completed_at,omitempty"` + Conclusion *string `json:"conclusion,omitempty"` + Config *HookConfig `json:"config,omitempty"` + ConfigWas *HookConfig `json:"config_was,omitempty"` + ContentType *string `json:"content_type,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + DeployKeyFingerprint *string `json:"deploy_key_fingerprint,omitempty"` + DocumentID *string `json:"_document_id,omitempty"` + Emoji *string `json:"emoji,omitempty"` + EnvironmentName *string `json:"environment_name,omitempty"` + Event *string `json:"event,omitempty"` + Events []string `json:"events,omitempty"` + EventsWere []string `json:"events_were,omitempty"` + Explanation *string `json:"explanation,omitempty"` + Fingerprint *string `json:"fingerprint,omitempty"` + HashedToken *string `json:"hashed_token,omitempty"` + HeadBranch *string `json:"head_branch,omitempty"` + HeadSHA *string `json:"head_sha,omitempty"` + HookID *int64 `json:"hook_id,omitempty"` + IsHostedRunner *bool `json:"is_hosted_runner,omitempty"` + JobName *string `json:"job_name,omitempty"` + JobWorkflowRef *string `json:"job_workflow_ref,omitempty"` + LimitedAvailability *bool `json:"limited_availability,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + OAuthApplicationID *int64 `json:"oauth_application_id,omitempty"` + OldUser *string `json:"old_user,omitempty"` + OldPermission *string `json:"old_permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`. + OpenSSHPublicKey *string `json:"openssh_public_key,omitempty"` + OperationType *string `json:"operation_type,omitempty"` + Org *string `json:"org,omitempty"` + OrgID *int64 `json:"org_id,omitempty"` + OverriddenCodes []string `json:"overridden_codes,omitempty"` + Permission *string `json:"permission,omitempty"` // The permission level for membership changes, for example `admin` or `read`. + PreviousVisibility *string `json:"previous_visibility,omitempty"` + ProgrammaticAccessType *string `json:"programmatic_access_type,omitempty"` + PullRequestID *int64 `json:"pull_request_id,omitempty"` + PullRequestTitle *string `json:"pull_request_title,omitempty"` + PullRequestURL *string `json:"pull_request_url,omitempty"` + ReadOnly *string `json:"read_only,omitempty"` + Reasons []*PolicyOverrideReason `json:"reasons,omitempty"` + Repo *string `json:"repo,omitempty"` + Repository *string `json:"repository,omitempty"` + RepositoryPublic *bool `json:"repository_public,omitempty"` + RunAttempt *int64 `json:"run_attempt,omitempty"` + RunnerGroupID *int64 `json:"runner_group_id,omitempty"` + RunnerGroupName *string `json:"runner_group_name,omitempty"` + RunnerID *int64 `json:"runner_id,omitempty"` + RunnerLabels []string `json:"runner_labels,omitempty"` + RunnerName *string `json:"runner_name,omitempty"` + RunNumber *int64 `json:"run_number,omitempty"` + SecretsPassed []string `json:"secrets_passed,omitempty"` + SourceVersion *string `json:"source_version,omitempty"` + StartedAt *Timestamp `json:"started_at,omitempty"` + TargetLogin *string `json:"target_login,omitempty"` + TargetVersion *string `json:"target_version,omitempty"` + Team *string `json:"team,omitempty"` + Timestamp *Timestamp `json:"@timestamp,omitempty"` // The time the audit log event occurred, given as a [Unix timestamp](http://en.wikipedia.org/wiki/Unix_time). + TokenID *int64 `json:"token_id,omitempty"` + TokenScopes *string `json:"token_scopes,omitempty"` + Topic *string `json:"topic,omitempty"` + TransportProtocolName *string `json:"transport_protocol_name,omitempty"` // A human readable name for the protocol (for example, HTTP or SSH) used to transfer Git data. + TransportProtocol *int `json:"transport_protocol,omitempty"` // The type of protocol (for example, HTTP=1 or SSH=2) used to transfer Git data. + TriggerID *int64 `json:"trigger_id,omitempty"` + User *string `json:"user,omitempty"` // The user that was affected by the action performed (if available). + UserAgent *string `json:"user_agent,omitempty"` + Visibility *string `json:"visibility,omitempty"` // The repository visibility, for example `public` or `private`. + WorkflowID *int64 `json:"workflow_id,omitempty"` + WorkflowRunID *int64 `json:"workflow_run_id,omitempty"` +} + +// GetAuditLog gets the audit-log entries for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization +func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { + u := fmt.Sprintf("orgs/%v/audit-log", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var auditEntries []*AuditEntry + resp, err := s.client.Do(ctx, req, &auditEntries) + if err != nil { + return nil, resp, err + } + + return auditEntries, resp, nil +} diff --git a/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go new file mode 100644 index 0000000000..7c1b2d6292 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/orgs_custom_roles.go @@ -0,0 +1,120 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// OrganizationCustomRepoRoles represents custom repository roles available in specified organization. +type OrganizationCustomRepoRoles struct { + TotalCount *int `json:"total_count,omitempty"` + CustomRepoRoles []*CustomRepoRoles `json:"custom_roles,omitempty"` +} + +// CustomRepoRoles represents custom repository roles for an organization. +// See https://docs.github.com/en/enterprise-cloud@latest/organizations/managing-peoples-access-to-your-organization-with-roles/managing-custom-repository-roles-for-an-organization +// for more information. +type CustomRepoRoles struct { + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + BaseRole *string `json:"base_role,omitempty"` + Permissions []string `json:"permissions,omitempty"` +} + +// ListCustomRepoRoles lists the custom repository roles available in this organization. +// In order to see custom repository roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#list-custom-repository-roles-in-an-organization +func (s *OrganizationsService) ListCustomRepoRoles(ctx context.Context, org string) (*OrganizationCustomRepoRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/custom-repository-roles", org) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + customRepoRoles := new(OrganizationCustomRepoRoles) + resp, err := s.client.Do(ctx, req, customRepoRoles) + if err != nil { + return nil, resp, err + } + + return customRepoRoles, resp, nil +} + +// CreateOrUpdateCustomRoleOptions represents options required to create or update a custom repository role. +type CreateOrUpdateCustomRoleOptions struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + BaseRole *string `json:"base_role,omitempty"` + Permissions []string `json:"permissions,omitempty"` +} + +// CreateCustomRepoRole creates a custom repository role in this organization. +// In order to create custom repository roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#create-a-custom-repository-role +func (s *OrganizationsService) CreateCustomRepoRole(ctx context.Context, org string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/custom-repository-roles", org) + + req, err := s.client.NewRequest("POST", u, opts) + if err != nil { + return nil, nil, err + } + + resultingRole := new(CustomRepoRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return nil, resp, err + } + + return resultingRole, resp, err +} + +// UpdateCustomRepoRole updates a custom repository role in this organization. +// In order to update custom repository roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#update-a-custom-repository-role +func (s *OrganizationsService) UpdateCustomRepoRole(ctx context.Context, org, roleID string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) + + req, err := s.client.NewRequest("PATCH", u, opts) + if err != nil { + return nil, nil, err + } + + resultingRole := new(CustomRepoRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return nil, resp, err + } + + return resultingRole, resp, err +} + +// DeleteCustomRepoRole deletes an existing custom repository role in this organization. +// In order to delete custom repository roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#delete-a-custom-repository-role +func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org, roleID string) (*Response, error) { + u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resultingRole := new(CustomRepoRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/orgs_hooks.go b/vendor/github.com/google/go-github/v53/github/orgs_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_hooks.go rename to vendor/github.com/google/go-github/v53/github/orgs_hooks.go diff --git a/vendor/github.com/google/go-github/v45/github/orgs_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_hooks_deliveries.go rename to vendor/github.com/google/go-github/v53/github/orgs_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v45/github/orgs_members.go b/vendor/github.com/google/go-github/v53/github/orgs_members.go similarity index 99% rename from vendor/github.com/google/go-github/v45/github/orgs_members.go rename to vendor/github.com/google/go-github/v53/github/orgs_members.go index 38f43bad5a..79f8a65333 100644 --- a/vendor/github.com/google/go-github/v45/github/orgs_members.go +++ b/vendor/github.com/google/go-github/v53/github/orgs_members.go @@ -315,8 +315,8 @@ type CreateOrgInvitationOptions struct { // * billing_manager - Non-owner organization members with ability to // manage the billing settings of your organization. // Default is "direct_member". - Role *string `json:"role"` - TeamID []int64 `json:"team_ids"` + Role *string `json:"role,omitempty"` + TeamID []int64 `json:"team_ids,omitempty"` } // CreateOrgInvitation invites people to an organization by using their GitHub user ID or their email address. diff --git a/vendor/github.com/google/go-github/v45/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_outside_collaborators.go rename to vendor/github.com/google/go-github/v53/github/orgs_outside_collaborators.go diff --git a/vendor/github.com/google/go-github/v45/github/orgs_packages.go b/vendor/github.com/google/go-github/v53/github/orgs_packages.go similarity index 97% rename from vendor/github.com/google/go-github/v45/github/orgs_packages.go rename to vendor/github.com/google/go-github/v53/github/orgs_packages.go index 9fb11308b8..0ae68aaa36 100644 --- a/vendor/github.com/google/go-github/v45/github/orgs_packages.go +++ b/vendor/github.com/google/go-github/v53/github/orgs_packages.go @@ -81,7 +81,7 @@ func (s *OrganizationsService) RestorePackage(ctx context.Context, org, packageT // Get all versions of a package in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#list-package-versions-for-a-package-owned-by-an-organization func (s *OrganizationsService) PackageGetAllVersions(ctx context.Context, org, packageType, packageName string, opts *PackageListOptions) ([]*PackageVersion, *Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions", org, packageType, packageName) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v45/github/orgs_projects.go b/vendor/github.com/google/go-github/v53/github/orgs_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_projects.go rename to vendor/github.com/google/go-github/v53/github/orgs_projects.go diff --git a/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go b/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go new file mode 100644 index 0000000000..a3f002e0e1 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/orgs_security_managers.go @@ -0,0 +1,57 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// ListSecurityManagerTeams lists all security manager teams for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#list-security-manager-teams +func (s *OrganizationsService) ListSecurityManagerTeams(ctx context.Context, org string) ([]*Team, *Response, error) { + u := fmt.Sprintf("orgs/%v/security-managers", org) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var teams []*Team + resp, err := s.client.Do(ctx, req, &teams) + if err != nil { + return nil, resp, err + } + + return teams, resp, nil +} + +// AddSecurityManagerTeam adds a team to the list of security managers for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#add-a-security-manager-team +func (s *OrganizationsService) AddSecurityManagerTeam(ctx context.Context, org, team string) (*Response, error) { + u := fmt.Sprintf("orgs/%v/security-managers/teams/%v", org, team) + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// RemoveSecurityManagerTeam removes a team from the list of security managers for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/security-managers#remove-a-security-manager-team +func (s *OrganizationsService) RemoveSecurityManagerTeam(ctx context.Context, org, team string) (*Response, error) { + u := fmt.Sprintf("orgs/%v/security-managers/teams/%v", org, team) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v45/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/orgs_users_blocking.go rename to vendor/github.com/google/go-github/v53/github/orgs_users_blocking.go diff --git a/vendor/github.com/google/go-github/v45/github/packages.go b/vendor/github.com/google/go-github/v53/github/packages.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/packages.go rename to vendor/github.com/google/go-github/v53/github/packages.go diff --git a/vendor/github.com/google/go-github/v45/github/projects.go b/vendor/github.com/google/go-github/v53/github/projects.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/projects.go rename to vendor/github.com/google/go-github/v53/github/projects.go diff --git a/vendor/github.com/google/go-github/v45/github/pulls.go b/vendor/github.com/google/go-github/v53/github/pulls.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/pulls.go rename to vendor/github.com/google/go-github/v53/github/pulls.go index 120a1d6f18..6e49eba2f9 100644 --- a/vendor/github.com/google/go-github/v45/github/pulls.go +++ b/vendor/github.com/google/go-github/v53/github/pulls.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "fmt" - "time" ) // PullRequestsService handles communication with the pull request related @@ -34,10 +33,10 @@ type PullRequest struct { Locked *bool `json:"locked,omitempty"` Title *string `json:"title,omitempty"` Body *string `json:"body,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` - ClosedAt *time.Time `json:"closed_at,omitempty"` - MergedAt *time.Time `json:"merged_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + ClosedAt *Timestamp `json:"closed_at,omitempty"` + MergedAt *Timestamp `json:"merged_at,omitempty"` Labels []*Label `json:"labels,omitempty"` User *User `json:"user,omitempty"` Draft *bool `json:"draft,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/pulls_comments.go b/vendor/github.com/google/go-github/v53/github/pulls_comments.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/pulls_comments.go rename to vendor/github.com/google/go-github/v53/github/pulls_comments.go index 83e7881e51..1f6b726d85 100644 --- a/vendor/github.com/google/go-github/v45/github/pulls_comments.go +++ b/vendor/github.com/google/go-github/v53/github/pulls_comments.go @@ -33,8 +33,8 @@ type PullRequestComment struct { OriginalCommitID *string `json:"original_commit_id,omitempty"` User *User `json:"user,omitempty"` Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` // AuthorAssociation is the comment author's relationship to the pull request's repository. // Possible values are "COLLABORATOR", "CONTRIBUTOR", "FIRST_TIMER", "FIRST_TIME_CONTRIBUTOR", "MEMBER", "OWNER", or "NONE". AuthorAssociation *string `json:"author_association,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/pulls_reviewers.go b/vendor/github.com/google/go-github/v53/github/pulls_reviewers.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/pulls_reviewers.go rename to vendor/github.com/google/go-github/v53/github/pulls_reviewers.go diff --git a/vendor/github.com/google/go-github/v45/github/pulls_reviews.go b/vendor/github.com/google/go-github/v53/github/pulls_reviews.go similarity index 94% rename from vendor/github.com/google/go-github/v45/github/pulls_reviews.go rename to vendor/github.com/google/go-github/v53/github/pulls_reviews.go index 14e20322ae..addcce4683 100644 --- a/vendor/github.com/google/go-github/v45/github/pulls_reviews.go +++ b/vendor/github.com/google/go-github/v53/github/pulls_reviews.go @@ -9,7 +9,6 @@ import ( "context" "errors" "fmt" - "time" ) var ErrMixedCommentStyles = errors.New("cannot use both position and side/line form comments") @@ -20,7 +19,7 @@ type PullRequestReview struct { NodeID *string `json:"node_id,omitempty"` User *User `json:"user,omitempty"` Body *string `json:"body,omitempty"` - SubmittedAt *time.Time `json:"submitted_at,omitempty"` + SubmittedAt *Timestamp `json:"submitted_at,omitempty"` CommitID *string `json:"commit_id,omitempty"` HTMLURL *string `json:"html_url,omitempty"` PullRequestURL *string `json:"pull_request_url,omitempty"` @@ -193,35 +192,37 @@ func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, rep // // In order to use multi-line comments, you must use the "comfort fade" preview. // This replaces the use of the "Position" field in comments with 4 new fields: -// [Start]Side, and [Start]Line. +// +// [Start]Side, and [Start]Line. +// // These new fields must be used for ALL comments (including single-line), // with the following restrictions (empirically observed, so subject to change). // // For single-line "comfort fade" comments, you must use: // -// Path: &path, // as before -// Body: &body, // as before -// Side: &"RIGHT" (or "LEFT") -// Line: &123, // NOT THE SAME AS POSITION, this is an actual line number. +// Path: &path, // as before +// Body: &body, // as before +// Side: &"RIGHT" (or "LEFT") +// Line: &123, // NOT THE SAME AS POSITION, this is an actual line number. // // If StartSide or StartLine is used with single-line comments, a 422 is returned. // // For multi-line "comfort fade" comments, you must use: // -// Path: &path, // as before -// Body: &body, // as before -// StartSide: &"RIGHT" (or "LEFT") -// Side: &"RIGHT" (or "LEFT") -// StartLine: &120, -// Line: &125, +// Path: &path, // as before +// Body: &body, // as before +// StartSide: &"RIGHT" (or "LEFT") +// Side: &"RIGHT" (or "LEFT") +// StartLine: &120, +// Line: &125, // // Suggested edits are made by commenting on the lines to replace, and including the // suggested edit in a block like this (it may be surrounded in non-suggestion markdown): // -// ```suggestion -// Use this instead. -// It is waaaaaay better. -// ``` +// ```suggestion +// Use this instead. +// It is waaaaaay better. +// ``` func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo string, number int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) diff --git a/vendor/github.com/google/go-github/v45/github/pulls_threads.go b/vendor/github.com/google/go-github/v53/github/pulls_threads.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/pulls_threads.go rename to vendor/github.com/google/go-github/v53/github/pulls_threads.go diff --git a/vendor/github.com/google/go-github/v45/github/reactions.go b/vendor/github.com/google/go-github/v53/github/reactions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/reactions.go rename to vendor/github.com/google/go-github/v53/github/reactions.go diff --git a/vendor/github.com/google/go-github/v45/github/repos.go b/vendor/github.com/google/go-github/v53/github/repos.go similarity index 76% rename from vendor/github.com/google/go-github/v45/github/repos.go rename to vendor/github.com/google/go-github/v53/github/repos.go index a5f7fc6cf8..5ffad6dd3c 100644 --- a/vendor/github.com/google/go-github/v45/github/repos.go +++ b/vendor/github.com/google/go-github/v53/github/repos.go @@ -68,8 +68,13 @@ type Repository struct { AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` AllowForking *bool `json:"allow_forking,omitempty"` + WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` + SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "COMMIT_OR_PR_TITLE" + SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "COMMIT_MESSAGES", "BLANK" + MergeCommitTitle *string `json:"merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "MERGE_MESSAGE" + MergeCommitMessage *string `json:"merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "PR_TITLE", "BLANK" Topics []string `json:"topics,omitempty"` Archived *bool `json:"archived,omitempty"` Disabled *bool `json:"disabled,omitempty"` @@ -84,6 +89,7 @@ type Repository struct { HasPages *bool `json:"has_pages,omitempty"` HasProjects *bool `json:"has_projects,omitempty"` HasDownloads *bool `json:"has_downloads,omitempty"` + HasDiscussions *bool `json:"has_discussions,omitempty"` IsTemplate *bool `json:"is_template,omitempty"` LicenseTemplate *string `json:"license_template,omitempty"` GitignoreTemplate *string `json:"gitignore_template,omitempty"` @@ -201,8 +207,9 @@ type RepositoryListOptions struct { // SecurityAndAnalysis specifies the optional advanced security features // that are enabled on a given repository. type SecurityAndAnalysis struct { - AdvancedSecurity *AdvancedSecurity `json:"advanced_security,omitempty"` - SecretScanning *SecretScanning `json:"secret_scanning,omitempty"` + AdvancedSecurity *AdvancedSecurity `json:"advanced_security,omitempty"` + SecretScanning *SecretScanning `json:"secret_scanning,omitempty"` + SecretScanningPushProtection *SecretScanningPushProtection `json:"secret_scanning_push_protection,omitempty"` } func (s SecurityAndAnalysis) String() string { @@ -231,6 +238,13 @@ func (s SecretScanning) String() string { return Stringify(s) } +// SecretScanningPushProtection specifies the state of secret scanning push protection on a repository. +// +// GitHub API docs: https://docs.github.com/en/code-security/secret-scanning/about-secret-scanning#about-secret-scanning-for-partner-patterns +type SecretScanningPushProtection struct { + Status *string `json:"status,omitempty"` +} + // List the repositories for a user. Passing the empty string will list // repositories for the authenticated user. // @@ -353,12 +367,13 @@ type createRepoRequest struct { Description *string `json:"description,omitempty"` Homepage *string `json:"homepage,omitempty"` - Private *bool `json:"private,omitempty"` - Visibility *string `json:"visibility,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasProjects *bool `json:"has_projects,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - IsTemplate *bool `json:"is_template,omitempty"` + Private *bool `json:"private,omitempty"` + Visibility *string `json:"visibility,omitempty"` + HasIssues *bool `json:"has_issues,omitempty"` + HasProjects *bool `json:"has_projects,omitempty"` + HasWiki *bool `json:"has_wiki,omitempty"` + HasDiscussions *bool `json:"has_discussions,omitempty"` + IsTemplate *bool `json:"is_template,omitempty"` // Creating an organization repository. Required for non-owners. TeamID *int64 `json:"team_id,omitempty"` @@ -374,6 +389,10 @@ type createRepoRequest struct { AllowForking *bool `json:"allow_forking,omitempty"` DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` + SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` + SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` + MergeCommitTitle *string `json:"merge_commit_title,omitempty"` + MergeCommitMessage *string `json:"merge_commit_message,omitempty"` } // Create a new repository. If an organization is specified, the new @@ -407,6 +426,7 @@ func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repo HasIssues: repo.HasIssues, HasProjects: repo.HasProjects, HasWiki: repo.HasWiki, + HasDiscussions: repo.HasDiscussions, IsTemplate: repo.IsTemplate, TeamID: repo.TeamID, AutoInit: repo.AutoInit, @@ -420,6 +440,10 @@ func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repo AllowForking: repo.AllowForking, DeleteBranchOnMerge: repo.DeleteBranchOnMerge, UseSquashPRTitleAsDefault: repo.UseSquashPRTitleAsDefault, + SquashMergeCommitTitle: repo.SquashMergeCommitTitle, + SquashMergeCommitMessage: repo.SquashMergeCommitMessage, + MergeCommitTitle: repo.MergeCommitTitle, + MergeCommitMessage: repo.MergeCommitMessage, } req, err := s.client.NewRequest("POST", u, repoReq) @@ -472,7 +496,7 @@ func (s *RepositoriesService) CreateFromTemplate(ctx context.Context, templateOw // Get fetches a repository. // -// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#get-a-repository func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -725,10 +749,10 @@ func (s *RepositoriesService) ListContributors(ctx context.Context, owner string // specifies the languages and the number of bytes of code written in that // language. For example: // -// { -// "C": 78769, -// "Python": 7769 -// } +// { +// "C": 78769, +// "Python": 7769 +// } // // GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-languages func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, repo string) (map[string]int, *Response, error) { @@ -820,6 +844,28 @@ type Protection struct { AllowForcePushes *AllowForcePushes `json:"allow_force_pushes"` AllowDeletions *AllowDeletions `json:"allow_deletions"` RequiredConversationResolution *RequiredConversationResolution `json:"required_conversation_resolution"` + BlockCreations *BlockCreations `json:"block_creations,omitempty"` + LockBranch *LockBranch `json:"lock_branch,omitempty"` + AllowForkSyncing *AllowForkSyncing `json:"allow_fork_syncing,omitempty"` + RequiredSignatures *SignaturesProtectedBranch `json:"required_signatures,omitempty"` + URL *string `json:"url,omitempty"` +} + +// BlockCreations represents whether users can push changes that create branches. If this is true, this +// setting blocks pushes that create new branches, unless the push is initiated by a user, team, or app +// which has the ability to push. +type BlockCreations struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// LockBranch represents if the branch is marked as read-only. If this is true, users will not be able to push to the branch. +type LockBranch struct { + Enabled *bool `json:"enabled,omitempty"` +} + +// AllowForkSyncing represents whether users can pull changes from upstream when the branch is locked. +type AllowForkSyncing struct { + Enabled *bool `json:"enabled,omitempty"` } // BranchProtectionRule represents the rule applied to a repositories branch. @@ -852,8 +898,31 @@ type BranchProtectionRule struct { // ProtectionChanges represents the changes to the rule if the BranchProtection was edited. type ProtectionChanges struct { - AuthorizedActorsOnly *AuthorizedActorsOnly `json:"authorized_actors_only,omitempty"` - AuthorizedActorNames *AuthorizedActorNames `json:"authorized_actor_names,omitempty"` + AdminEnforced *AdminEnforcedChanges `json:"admin_enforced,omitempty"` + AllowDeletionsEnforcementLevel *AllowDeletionsEnforcementLevelChanges `json:"allow_deletions_enforcement_level,omitempty"` + AuthorizedActorNames *AuthorizedActorNames `json:"authorized_actor_names,omitempty"` + AuthorizedActorsOnly *AuthorizedActorsOnly `json:"authorized_actors_only,omitempty"` + AuthorizedDismissalActorsOnly *AuthorizedDismissalActorsOnlyChanges `json:"authorized_dismissal_actors_only,omitempty"` + CreateProtected *CreateProtectedChanges `json:"create_protected,omitempty"` + DismissStaleReviewsOnPush *DismissStaleReviewsOnPushChanges `json:"dismiss_stale_reviews_on_push,omitempty"` + LinearHistoryRequirementEnforcementLevel *LinearHistoryRequirementEnforcementLevelChanges `json:"linear_history_requirement_enforcement_level,omitempty"` + PullRequestReviewsEnforcementLevel *PullRequestReviewsEnforcementLevelChanges `json:"pull_request_reviews_enforcement_level,omitempty"` + RequireCodeOwnerReview *RequireCodeOwnerReviewChanges `json:"require_code_owner_review,omitempty"` + RequiredConversationResolutionLevel *RequiredConversationResolutionLevelChanges `json:"required_conversation_resolution_level,omitempty"` + RequiredDeploymentsEnforcementLevel *RequiredDeploymentsEnforcementLevelChanges `json:"required_deployments_enforcement_level,omitempty"` + RequiredStatusChecks *RequiredStatusChecksChanges `json:"required_status_checks,omitempty"` + RequiredStatusChecksEnforcementLevel *RequiredStatusChecksEnforcementLevelChanges `json:"required_status_checks_enforcement_level,omitempty"` + SignatureRequirementEnforcementLevel *SignatureRequirementEnforcementLevelChanges `json:"signature_requirement_enforcement_level,omitempty"` +} + +// AdminEnforcedChanges represents the changes made to the AdminEnforced policy. +type AdminEnforcedChanges struct { + From *bool `json:"from,omitempty"` +} + +// AllowDeletionsEnforcementLevelChanges represents the changes made to the AllowDeletionsEnforcementLevel policy. +type AllowDeletionsEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` } // AuthorizedActorNames represents who are authorized to edit the branch protection rules. @@ -861,11 +930,66 @@ type AuthorizedActorNames struct { From []string `json:"from,omitempty"` } -// AuthorizedActorsOnly represents if the branche rule can be edited by authorized actors only. +// AuthorizedActorsOnly represents if the branch rule can be edited by authorized actors only. type AuthorizedActorsOnly struct { From *bool `json:"from,omitempty"` } +// AuthorizedDismissalActorsOnlyChanges represents the changes made to the AuthorizedDismissalActorsOnly policy. +type AuthorizedDismissalActorsOnlyChanges struct { + From *bool `json:"from,omitempty"` +} + +// CreateProtectedChanges represents the changes made to the CreateProtected policy. +type CreateProtectedChanges struct { + From *bool `json:"from,omitempty"` +} + +// DismissStaleReviewsOnPushChanges represents the changes made to the DismissStaleReviewsOnPushChanges policy. +type DismissStaleReviewsOnPushChanges struct { + From *bool `json:"from,omitempty"` +} + +// LinearHistoryRequirementEnforcementLevelChanges represents the changes made to the LinearHistoryRequirementEnforcementLevel policy. +type LinearHistoryRequirementEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` +} + +// PullRequestReviewsEnforcementLevelChanges represents the changes made to the PullRequestReviewsEnforcementLevel policy. +type PullRequestReviewsEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` +} + +// RequireCodeOwnerReviewChanges represents the changes made to the RequireCodeOwnerReview policy. +type RequireCodeOwnerReviewChanges struct { + From *bool `json:"from,omitempty"` +} + +// RequiredConversationResolutionLevelChanges represents the changes made to the RequiredConversationResolutionLevel policy. +type RequiredConversationResolutionLevelChanges struct { + From *string `json:"from,omitempty"` +} + +// RequiredDeploymentsEnforcementLevelChanges represents the changes made to the RequiredDeploymentsEnforcementLevel policy. +type RequiredDeploymentsEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` +} + +// RequiredStatusChecksChanges represents the changes made to the RequiredStatusChecks policy. +type RequiredStatusChecksChanges struct { + From []string `json:"from,omitempty"` +} + +// RequiredStatusChecksEnforcementLevelChanges represents the changes made to the RequiredStatusChecksEnforcementLevel policy. +type RequiredStatusChecksEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` +} + +// SignatureRequirementEnforcementLevelChanges represents the changes made to the SignatureRequirementEnforcementLevel policy. +type SignatureRequirementEnforcementLevelChanges struct { + From *string `json:"from,omitempty"` +} + // ProtectionRequest represents a request to create/edit a branch's protection. type ProtectionRequest struct { RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"` @@ -881,6 +1005,14 @@ type ProtectionRequest struct { // RequiredConversationResolution, if set to true, requires all comments // on the pull request to be resolved before it can be merged to a protected branch. RequiredConversationResolution *bool `json:"required_conversation_resolution,omitempty"` + // BlockCreations, if set to true, will cause the restrictions setting to also block pushes + // which create new branches, unless initiated by a user, team, app with the ability to push. + BlockCreations *bool `json:"block_creations,omitempty"` + // LockBranch, if set to true, will prevent users from pushing to the branch. + LockBranch *bool `json:"lock_branch,omitempty"` + // AllowForkSyncing, if set to true, will allow users to pull changes from upstream + // when the branch is locked. + AllowForkSyncing *bool `json:"allow_fork_syncing,omitempty"` } // RequiredStatusChecks represents the protection status of a individual branch. @@ -893,7 +1025,9 @@ type RequiredStatusChecks struct { Contexts []string `json:"contexts,omitempty"` // The list of status checks to require in order to merge into this // branch. - Checks []*RequiredStatusCheck `json:"checks,omitempty"` + Checks []*RequiredStatusCheck `json:"checks"` + ContextsURL *string `json:"contexts_url,omitempty"` + URL *string `json:"url,omitempty"` } // RequiredStatusChecksRequest represents a request to edit a protected branch's status checks. @@ -919,7 +1053,9 @@ type RequiredStatusCheck struct { // PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. type PullRequestReviewsEnforcement struct { - // Specifies which users and teams can dismiss pull request reviews. + // Allow specific users, teams, or apps to bypass pull request requirements. + BypassPullRequestAllowances *BypassPullRequestAllowances `json:"bypass_pull_request_allowances,omitempty"` + // Specifies which users, teams and apps can dismiss pull request reviews. DismissalRestrictions *DismissalRestrictions `json:"dismissal_restrictions,omitempty"` // Specifies if approved reviews are dismissed automatically, when a new commit is pushed. DismissStaleReviews bool `json:"dismiss_stale_reviews"` @@ -928,14 +1064,18 @@ type PullRequestReviewsEnforcement struct { // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. // Valid values are 1-6. RequiredApprovingReviewCount int `json:"required_approving_review_count"` + // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. + RequireLastPushApproval bool `json:"require_last_push_approval"` } // PullRequestReviewsEnforcementRequest represents request to set the pull request review // enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above // because the request structure is different from the response structure. type PullRequestReviewsEnforcementRequest struct { - // Specifies which users and teams should be allowed to dismiss pull request reviews. - // User and team dismissal restrictions are only available for + // Allow specific users, teams, or apps to bypass pull request requirements. + BypassPullRequestAllowancesRequest *BypassPullRequestAllowancesRequest `json:"bypass_pull_request_allowances,omitempty"` + // Specifies which users, teams and apps should be allowed to dismiss pull request reviews. + // User, team and app dismissal restrictions are only available for // organization-owned repositories. Must be nil for personal repositories. DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required) @@ -945,13 +1085,17 @@ type PullRequestReviewsEnforcementRequest struct { // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. // Valid values are 1-6. RequiredApprovingReviewCount int `json:"required_approving_review_count"` + // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. + RequireLastPushApproval *bool `json:"require_last_push_approval,omitempty"` } // PullRequestReviewsEnforcementUpdate represents request to patch the pull request review // enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above // because the patch request does not require all fields to be initialized. type PullRequestReviewsEnforcementUpdate struct { - // Specifies which users and teams can dismiss pull request reviews. Can be omitted. + // Allow specific users, teams, or apps to bypass pull request requirements. + BypassPullRequestAllowancesRequest *BypassPullRequestAllowancesRequest `json:"bypass_pull_request_allowances,omitempty"` + // Specifies which users, teams and apps can dismiss pull request reviews. Can be omitted. DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted. DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` @@ -960,6 +1104,8 @@ type PullRequestReviewsEnforcementUpdate struct { // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. // Valid values are 1 - 6 or 0 to not require reviewers. RequiredApprovingReviewCount int `json:"required_approving_review_count"` + // RequireLastPushApproval specifies whether the last pusher to a pull request branch can approve it. + RequireLastPushApproval *bool `json:"require_last_push_approval,omitempty"` } // RequireLinearHistory represents the configuration to enforce branches with no merge commit. @@ -1009,7 +1155,30 @@ type BranchRestrictionsRequest struct { // The list of team slugs with push access. (Required; use []string{} instead of nil for empty list.) Teams []string `json:"teams"` // The list of app slugs with push access. - Apps []string `json:"apps,omitempty"` + Apps []string `json:"apps"` +} + +// BypassPullRequestAllowances represents the people, teams, or apps who are allowed to bypass required pull requests. +type BypassPullRequestAllowances struct { + // The list of users allowed to bypass pull request requirements. + Users []*User `json:"users"` + // The list of teams allowed to bypass pull request requirements. + Teams []*Team `json:"teams"` + // The list of apps allowed to bypass pull request requirements. + Apps []*App `json:"apps"` +} + +// BypassPullRequestAllowancesRequest represents the people, teams, or apps who are +// allowed to bypass required pull requests. +// It is separate from BypassPullRequestAllowances above because the request structure is +// different from the response structure. +type BypassPullRequestAllowancesRequest struct { + // The list of user logins allowed to bypass pull request requirements. + Users []string `json:"users"` + // The list of team slugs allowed to bypass pull request requirements. + Teams []string `json:"teams"` + // The list of app slugs allowed to bypass pull request requirements. + Apps []string `json:"apps"` } // DismissalRestrictions specifies which users and teams can dismiss pull request reviews. @@ -1018,10 +1187,12 @@ type DismissalRestrictions struct { Users []*User `json:"users"` // The list of teams which can dismiss pull request reviews. Teams []*Team `json:"teams"` + // The list of apps which can dismiss pull request reviews. + Apps []*App `json:"apps"` } // DismissalRestrictionsRequest represents the request to create/edit the -// restriction to allows only specific users or teams to dimiss pull request reviews. It is +// restriction to allows only specific users, teams or apps to dimiss pull request reviews. It is // separate from DismissalRestrictions above because the request structure is // different from the response structure. // Note: Both Users and Teams must be nil, or both must be non-nil. @@ -1030,6 +1201,8 @@ type DismissalRestrictionsRequest struct { Users *[]string `json:"users,omitempty"` // The list of team slugs which can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) Teams *[]string `json:"teams,omitempty"` + // The list of app slugs which can dismiss pull request reviews. (Required; use nil to disable dismissal_restrictions or &[]string{} otherwise.) + Apps *[]string `json:"apps,omitempty"` } // SignaturesProtectedBranch represents the protection status of an individual branch. @@ -1524,6 +1697,8 @@ func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo // It requires the GitHub apps to have `write` access to the `content` permission. // // GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-apps-with-access-to-the-protected-branch +// +// Deprecated: Please use ListAppRestrictions instead. func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1540,6 +1715,16 @@ func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch return apps, resp, nil } +// ListAppRestrictions lists the GitHub apps that have push access to a given protected branch. +// It requires the GitHub apps to have `write` access to the `content` permission. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-apps-with-access-to-the-protected-branch +// +// Note: This is a wrapper around ListApps so a naming convention with ListUserRestrictions and ListTeamRestrictions is preserved. +func (s *RepositoriesService) ListAppRestrictions(ctx context.Context, owner, repo, branch string) ([]*App, *Response, error) { + return s.ListApps(ctx, owner, repo, branch) +} + // ReplaceAppRestrictions replaces the apps that have push access to a given protected branch. // It removes all apps that previously had push access and grants push access to the new list of apps. // It requires the GitHub apps to have `write` access to the `content` permission. @@ -1547,20 +1732,20 @@ func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch // Note: The list of users, apps, and teams in total is limited to 100 items. // // GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-app-access-restrictions -func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { +func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("PUT", u, slug) + req, err := s.client.NewRequest("PUT", u, apps) if err != nil { return nil, nil, err } - var apps []*App - resp, err := s.client.Do(ctx, req, &apps) + var newApps []*App + resp, err := s.client.Do(ctx, req, &newApps) if err != nil { return nil, resp, err } - return apps, resp, nil + return newApps, resp, nil } // AddAppRestrictions grants the specified apps push access to a given protected branch. @@ -1569,47 +1754,222 @@ func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, // Note: The list of users, apps, and teams in total is limited to 100 items. // // GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-app-access-restrictions -func (s *RepositoriesService) AddAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { +func (s *RepositoriesService) AddAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("POST", u, slug) + req, err := s.client.NewRequest("POST", u, apps) if err != nil { return nil, nil, err } - var apps []*App - resp, err := s.client.Do(ctx, req, &apps) + var newApps []*App + resp, err := s.client.Do(ctx, req, &newApps) if err != nil { return nil, resp, err } - return apps, resp, nil + return newApps, resp, nil } -// RemoveAppRestrictions removes the ability of an app to push to this branch. +// RemoveAppRestrictions removes the restrictions of an app from pushing to this branch. // It requires the GitHub apps to have `write` access to the `content` permission. // // Note: The list of users, apps, and teams in total is limited to 100 items. // // GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-app-access-restrictions -func (s *RepositoriesService) RemoveAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { +func (s *RepositoriesService) RemoveAppRestrictions(ctx context.Context, owner, repo, branch string, apps []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) - req, err := s.client.NewRequest("DELETE", u, slug) + req, err := s.client.NewRequest("DELETE", u, apps) if err != nil { return nil, nil, err } - var apps []*App - resp, err := s.client.Do(ctx, req, &apps) + var newApps []*App + resp, err := s.client.Do(ctx, req, &newApps) if err != nil { return nil, resp, err } - return apps, resp, nil + return newApps, resp, nil +} + +// ListTeamRestrictions lists the GitHub teams that have push access to a given protected branch. +// It requires the GitHub teams to have `write` access to the `content` permission. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-teams-with-access-to-the-protected-branch +func (s *RepositoriesService) ListTeamRestrictions(ctx context.Context, owner, repo, branch string) ([]*Team, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var teams []*Team + resp, err := s.client.Do(ctx, req, &teams) + if err != nil { + return nil, resp, err + } + + return teams, resp, nil +} + +// ReplaceTeamRestrictions replaces the team that have push access to a given protected branch. +// This removes all teams that previously had push access and grants push access to the new list of teams. +// It requires the GitHub teams to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-team-access-restrictions +func (s *RepositoriesService) ReplaceTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) + req, err := s.client.NewRequest("PUT", u, teams) + if err != nil { + return nil, nil, err + } + + var newTeams []*Team + resp, err := s.client.Do(ctx, req, &newTeams) + if err != nil { + return nil, resp, err + } + + return newTeams, resp, nil +} + +// AddTeamRestrictions grants the specified teams push access to a given protected branch. +// It requires the GitHub teams to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-team-access-restrictions +func (s *RepositoriesService) AddTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) + req, err := s.client.NewRequest("POST", u, teams) + if err != nil { + return nil, nil, err + } + + var newTeams []*Team + resp, err := s.client.Do(ctx, req, &newTeams) + if err != nil { + return nil, resp, err + } + + return newTeams, resp, nil +} + +// RemoveTeamRestrictions removes the restrictions of a team from pushing to this branch. +// It requires the GitHub teams to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-team-access-restrictions +func (s *RepositoriesService) RemoveTeamRestrictions(ctx context.Context, owner, repo, branch string, teams []string) ([]*Team, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/teams", owner, repo, branch) + req, err := s.client.NewRequest("DELETE", u, teams) + if err != nil { + return nil, nil, err + } + + var newTeams []*Team + resp, err := s.client.Do(ctx, req, &newTeams) + if err != nil { + return nil, resp, err + } + + return newTeams, resp, nil +} + +// ListUserRestrictions lists the GitHub users that have push access to a given protected branch. +// It requires the GitHub users to have `write` access to the `content` permission. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-users-with-access-to-the-protected-branch +func (s *RepositoriesService) ListUserRestrictions(ctx context.Context, owner, repo, branch string) ([]*User, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var users []*User + resp, err := s.client.Do(ctx, req, &users) + if err != nil { + return nil, resp, err + } + + return users, resp, nil +} + +// ReplaceUserRestrictions replaces the user that have push access to a given protected branch. +// It removes all users that previously had push access and grants push access to the new list of users. +// It requires the GitHub users to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-team-access-restrictions +func (s *RepositoriesService) ReplaceUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) + req, err := s.client.NewRequest("PUT", u, users) + if err != nil { + return nil, nil, err + } + + var newUsers []*User + resp, err := s.client.Do(ctx, req, &newUsers) + if err != nil { + return nil, resp, err + } + + return newUsers, resp, nil +} + +// AddUserRestrictions grants the specified users push access to a given protected branch. +// It requires the GitHub users to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-team-access-restrictions +func (s *RepositoriesService) AddUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) + req, err := s.client.NewRequest("POST", u, users) + if err != nil { + return nil, nil, err + } + + var newUsers []*User + resp, err := s.client.Do(ctx, req, &newUsers) + if err != nil { + return nil, resp, err + } + + return newUsers, resp, nil +} + +// RemoveUserRestrictions removes the restrictions of a user from pushing to this branch. +// It requires the GitHub users to have `write` access to the `content` permission. +// +// Note: The list of users, apps, and teams in total is limited to 100 items. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-team-access-restrictions +func (s *RepositoriesService) RemoveUserRestrictions(ctx context.Context, owner, repo, branch string, users []string) ([]*User, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/users", owner, repo, branch) + req, err := s.client.NewRequest("DELETE", u, users) + if err != nil { + return nil, nil, err + } + + var newUsers []*User + resp, err := s.client.Do(ctx, req, &newUsers) + if err != nil { + return nil, resp, err + } + + return newUsers, resp, nil } // TransferRequest represents a request to transfer a repository. type TransferRequest struct { NewOwner string `json:"new_owner"` + NewName *string `json:"new_name,omitempty"` TeamID []int64 `json:"team_ids,omitempty"` } diff --git a/vendor/github.com/google/go-github/v53/github/repos_actions_access.go b/vendor/github.com/google/go-github/v53/github/repos_actions_access.go new file mode 100644 index 0000000000..55761eeb7e --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_actions_access.go @@ -0,0 +1,55 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// RepositoryActionsAccessLevel represents the repository actions access level. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-the-level-of-access-for-workflows-outside-of-the-repository +type RepositoryActionsAccessLevel struct { + // AccessLevel specifies the level of access that workflows outside of the repository have + // to actions and reusable workflows within the repository. + // Possible values are: "none", "organization" "enterprise". + AccessLevel *string `json:"access_level,omitempty"` +} + +// GetActionsAccessLevel gets the level of access that workflows outside of the repository have +// to actions and reusable workflows in the repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-the-level-of-access-for-workflows-outside-of-the-repository +func (s *RepositoriesService) GetActionsAccessLevel(ctx context.Context, owner, repo string) (*RepositoryActionsAccessLevel, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions/access", owner, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + raal := new(RepositoryActionsAccessLevel) + resp, err := s.client.Do(ctx, req, raal) + if err != nil { + return nil, resp, err + } + + return raal, resp, nil +} + +// EditActionsAccessLevel sets the level of access that workflows outside of the repository have +// to actions and reusable workflows in the repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-the-level-of-access-for-workflows-outside-of-the-repository +func (s *RepositoriesService) EditActionsAccessLevel(ctx context.Context, owner, repo string, repositoryActionsAccessLevel RepositoryActionsAccessLevel) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions/access", owner, repo) + req, err := s.client.NewRequest("PUT", u, repositoryActionsAccessLevel) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_actions_allowed.go b/vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_actions_allowed.go rename to vendor/github.com/google/go-github/v53/github/repos_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_actions_permissions.go rename to vendor/github.com/google/go-github/v53/github/repos_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_autolinks.go b/vendor/github.com/google/go-github/v53/github/repos_autolinks.go similarity index 89% rename from vendor/github.com/google/go-github/v45/github/repos_autolinks.go rename to vendor/github.com/google/go-github/v53/github/repos_autolinks.go index 8fa916eac2..0d2cec618f 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_autolinks.go +++ b/vendor/github.com/google/go-github/v53/github/repos_autolinks.go @@ -12,15 +12,17 @@ import ( // AutolinkOptions specifies parameters for RepositoriesService.AddAutolink method. type AutolinkOptions struct { - KeyPrefix *string `json:"key_prefix,omitempty"` - URLTemplate *string `json:"url_template,omitempty"` + KeyPrefix *string `json:"key_prefix,omitempty"` + URLTemplate *string `json:"url_template,omitempty"` + IsAlphanumeric *bool `json:"is_alphanumeric,omitempty"` } // Autolink represents autolinks to external resources like JIRA issues and Zendesk tickets. type Autolink struct { - ID *int64 `json:"id,omitempty"` - KeyPrefix *string `json:"key_prefix,omitempty"` - URLTemplate *string `json:"url_template,omitempty"` + ID *int64 `json:"id,omitempty"` + KeyPrefix *string `json:"key_prefix,omitempty"` + URLTemplate *string `json:"url_template,omitempty"` + IsAlphanumeric *bool `json:"is_alphanumeric,omitempty"` } // ListAutolinks returns a list of autolinks configured for the given repository. diff --git a/vendor/github.com/google/go-github/v53/github/repos_codeowners.go b/vendor/github.com/google/go-github/v53/github/repos_codeowners.go new file mode 100644 index 0000000000..835d56e164 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_codeowners.go @@ -0,0 +1,46 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// CodeownersErrors represents a list of syntax errors detected in the CODEOWNERS file. +type CodeownersErrors struct { + Errors []*CodeownersError `json:"errors"` +} + +// CodeownersError represents a syntax error detected in the CODEOWNERS file. +type CodeownersError struct { + Line int `json:"line"` + Column int `json:"column"` + Kind string `json:"kind"` + Source string `json:"source"` + Suggestion *string `json:"suggestion,omitempty"` + Message string `json:"message"` + Path string `json:"path"` +} + +// GetCodeownersErrors lists any syntax errors that are detected in the CODEOWNERS file. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-codeowners-errors +func (s *RepositoriesService) GetCodeownersErrors(ctx context.Context, owner, repo string) (*CodeownersErrors, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/codeowners/errors", owner, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + codeownersErrors := &CodeownersErrors{} + resp, err := s.client.Do(ctx, req, codeownersErrors) + if err != nil { + return nil, resp, err + } + + return codeownersErrors, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_collaborators.go b/vendor/github.com/google/go-github/v53/github/repos_collaborators.go similarity index 95% rename from vendor/github.com/google/go-github/v45/github/repos_collaborators.go rename to vendor/github.com/google/go-github/v53/github/repos_collaborators.go index abc4161c3b..c2396872f2 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_collaborators.go +++ b/vendor/github.com/google/go-github/v53/github/repos_collaborators.go @@ -23,6 +23,13 @@ type ListCollaboratorsOptions struct { // Default value is "all". Affiliation string `url:"affiliation,omitempty"` + // Permission specifies how collaborators should be filtered by the permissions they have on the repository. + // Possible values are: + // "pull", "triage", "push", "maintain", "admin" + // + // If not specified, all collaborators will be returned. + Permission string `url:"permission,omitempty"` + ListOptions } diff --git a/vendor/github.com/google/go-github/v45/github/repos_comments.go b/vendor/github.com/google/go-github/v53/github/repos_comments.go similarity index 97% rename from vendor/github.com/google/go-github/v45/github/repos_comments.go rename to vendor/github.com/google/go-github/v53/github/repos_comments.go index 55a88d1f5e..e282374e9e 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_comments.go +++ b/vendor/github.com/google/go-github/v53/github/repos_comments.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // RepositoryComment represents a comment for a commit, file, or line in a repository. @@ -20,8 +19,8 @@ type RepositoryComment struct { CommitID *string `json:"commit_id,omitempty"` User *User `json:"user,omitempty"` Reactions *Reactions `json:"reactions,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` // User-mutable fields Body *string `json:"body"` diff --git a/vendor/github.com/google/go-github/v45/github/repos_commits.go b/vendor/github.com/google/go-github/v53/github/repos_commits.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_commits.go rename to vendor/github.com/google/go-github/v53/github/repos_commits.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_community_health.go b/vendor/github.com/google/go-github/v53/github/repos_community_health.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/repos_community_health.go rename to vendor/github.com/google/go-github/v53/github/repos_community_health.go index 9de438b625..750ee15827 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_community_health.go +++ b/vendor/github.com/google/go-github/v53/github/repos_community_health.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // Metric represents the different fields for one file in community health files. @@ -38,7 +37,7 @@ type CommunityHealthMetrics struct { Description *string `json:"description"` Documentation *string `json:"documentation"` Files *CommunityHealthFiles `json:"files"` - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt *Timestamp `json:"updated_at"` ContentReportsEnabled *bool `json:"content_reports_enabled"` } diff --git a/vendor/github.com/google/go-github/v45/github/repos_contents.go b/vendor/github.com/google/go-github/v53/github/repos_contents.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/repos_contents.go rename to vendor/github.com/google/go-github/v53/github/repos_contents.go index d6f2dd9d74..be58fd52f6 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_contents.go +++ b/vendor/github.com/google/go-github/v53/github/repos_contents.go @@ -51,7 +51,7 @@ type RepositoryContentResponse struct { // RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile. type RepositoryContentFileOptions struct { Message *string `json:"message,omitempty"` - Content []byte `json:"content,omitempty"` // unencoded + Content []byte `json:"content"` // unencoded SHA *string `json:"sha,omitempty"` Branch *string `json:"branch,omitempty"` Author *CommitAuthor `json:"author,omitempty"` @@ -317,5 +317,9 @@ func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo st } parsedURL, err := url.Parse(resp.Header.Get("Location")) - return parsedURL, newResponse(resp), err + if err != nil { + return nil, newResponse(resp), err + } + + return parsedURL, newResponse(resp), nil } diff --git a/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go b/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go new file mode 100644 index 0000000000..8c4628b39b --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_deployment_branch_policies.go @@ -0,0 +1,123 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// DeploymentBranchPolicy represents a single deployment branch policy for an environment. +type DeploymentBranchPolicy struct { + Name *string `json:"name,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` +} + +// DeploymentBranchPolicyResponse represents the slightly different format of response that comes back when you list deployment branch policies. +type DeploymentBranchPolicyResponse struct { + TotalCount *int `json:"total_count,omitempty"` + BranchPolicies []*DeploymentBranchPolicy `json:"branch_policies,omitempty"` +} + +// DeploymentBranchPolicyRequest represents a deployment branch policy request. +type DeploymentBranchPolicyRequest struct { + Name *string `json:"name,omitempty"` +} + +// ListDeploymentBranchPolicies lists the deployment branch policies for an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#list-deployment-branch-policies +func (s *RepositoriesService) ListDeploymentBranchPolicies(ctx context.Context, owner, repo, environment string) (*DeploymentBranchPolicyResponse, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies", owner, repo, environment) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var list *DeploymentBranchPolicyResponse + resp, err := s.client.Do(ctx, req, &list) + if err != nil { + return nil, resp, err + } + + return list, resp, nil +} + +// GetDeploymentBranchPolicy gets a deployment branch policy for an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#get-a-deployment-branch-policy +func (s *RepositoriesService) GetDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64) (*DeploymentBranchPolicy, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var policy *DeploymentBranchPolicy + resp, err := s.client.Do(ctx, req, &policy) + if err != nil { + return nil, resp, err + } + + return policy, resp, nil +} + +// CreateDeploymentBranchPolicy creates a deployment branch policy for an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#create-a-deployment-branch-policy +func (s *RepositoriesService) CreateDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, request *DeploymentBranchPolicyRequest) (*DeploymentBranchPolicy, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies", owner, repo, environment) + + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + var policy *DeploymentBranchPolicy + resp, err := s.client.Do(ctx, req, &policy) + if err != nil { + return nil, resp, err + } + + return policy, resp, nil +} + +// UpdateDeploymentBranchPolicy updates a deployment branch policy for an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#update-a-deployment-branch-policy +func (s *RepositoriesService) UpdateDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64, request *DeploymentBranchPolicyRequest) (*DeploymentBranchPolicy, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) + + req, err := s.client.NewRequest("PUT", u, request) + if err != nil { + return nil, nil, err + } + + var policy *DeploymentBranchPolicy + resp, err := s.client.Do(ctx, req, &policy) + if err != nil { + return nil, resp, err + } + + return policy, resp, nil +} + +// DeleteDeploymentBranchPolicy deletes a deployment branch policy for an environment. +// +// GitHub API docs: https://docs.github.com/en/rest/deployments/branch-policies#delete-a-deployment-branch-policy +func (s *RepositoriesService) DeleteDeploymentBranchPolicy(ctx context.Context, owner, repo, environment string, branchPolicyID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/environments/%v/deployment-branch-policies/%v", owner, repo, environment, branchPolicyID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_deployments.go b/vendor/github.com/google/go-github/v53/github/repos_deployments.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_deployments.go rename to vendor/github.com/google/go-github/v53/github/repos_deployments.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_environments.go b/vendor/github.com/google/go-github/v53/github/repos_environments.go similarity index 77% rename from vendor/github.com/google/go-github/v45/github/repos_environments.go rename to vendor/github.com/google/go-github/v53/github/repos_environments.go index 365f8d9202..2399a42c74 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_environments.go +++ b/vendor/github.com/google/go-github/v53/github/repos_environments.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" ) // Environment represents a single environment in a repository. @@ -27,6 +28,7 @@ type Environment struct { HTMLURL *string `json:"html_url,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` UpdatedAt *Timestamp `json:"updated_at,omitempty"` + CanAdminsBypass *bool `json:"can_admins_bypass,omitempty"` ProtectionRules []*ProtectionRule `json:"protection_rules,omitempty"` } @@ -146,11 +148,15 @@ func (s *RepositoriesService) GetEnvironment(ctx context.Context, owner, repo, n // MarshalJSON implements the json.Marshaler interface. // As the only way to clear a WaitTimer is to set it to 0, a missing WaitTimer object should default to 0, not null. +// As the default value for CanAdminsBypass is true, a nil value here marshals to true. func (c *CreateUpdateEnvironment) MarshalJSON() ([]byte, error) { type Alias CreateUpdateEnvironment if c.WaitTimer == nil { c.WaitTimer = Int(0) } + if c.CanAdminsBypass == nil { + c.CanAdminsBypass = Bool(true) + } return json.Marshal(&struct { *Alias }{ @@ -165,9 +171,17 @@ func (c *CreateUpdateEnvironment) MarshalJSON() ([]byte, error) { type CreateUpdateEnvironment struct { WaitTimer *int `json:"wait_timer"` Reviewers []*EnvReviewers `json:"reviewers"` + CanAdminsBypass *bool `json:"can_admins_bypass"` DeploymentBranchPolicy *BranchPolicy `json:"deployment_branch_policy"` } +// createUpdateEnvironmentNoEnterprise represents the fields accepted for Pro/Teams private repos. +// Ref: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment +// See https://github.com/google/go-github/issues/2602 for more information. +type createUpdateEnvironmentNoEnterprise struct { + DeploymentBranchPolicy *BranchPolicy `json:"deployment_branch_policy"` +} + // CreateUpdateEnvironment create or update a new environment for a repository. // // GitHub API docs: https://docs.github.com/en/rest/deployments/environments#create-or-update-an-environment @@ -179,6 +193,33 @@ func (s *RepositoriesService) CreateUpdateEnvironment(ctx context.Context, owner return nil, nil, err } + e := new(Environment) + resp, err := s.client.Do(ctx, req, e) + if err != nil { + // The API returns 422 when the pricing plan doesn't support all the fields sent. + // This path will be executed for Pro/Teams private repos. + // For public repos, regardless of the pricing plan, all fields supported. + // For Free plan private repos the returned error code is 404. + // We are checking that the user didn't try to send a value for unsupported fields, + // and return an error if they did. + if resp != nil && resp.StatusCode == http.StatusUnprocessableEntity && environment != nil && len(environment.Reviewers) == 0 && environment.GetWaitTimer() == 0 { + return s.createNewEnvNoEnterprise(ctx, u, environment) + } + return nil, resp, err + } + return e, resp, nil +} + +// createNewEnvNoEnterprise is an internal function for cases where the original call returned 422. +// Currently only the `deployment_branch_policy` parameter is supported for Pro/Team private repos. +func (s *RepositoriesService) createNewEnvNoEnterprise(ctx context.Context, u string, environment *CreateUpdateEnvironment) (*Environment, *Response, error) { + req, err := s.client.NewRequest("PUT", u, &createUpdateEnvironmentNoEnterprise{ + DeploymentBranchPolicy: environment.DeploymentBranchPolicy, + }) + if err != nil { + return nil, nil, err + } + e := new(Environment) resp, err := s.client.Do(ctx, req, e) if err != nil { diff --git a/vendor/github.com/google/go-github/v45/github/repos_forks.go b/vendor/github.com/google/go-github/v53/github/repos_forks.go similarity index 92% rename from vendor/github.com/google/go-github/v45/github/repos_forks.go rename to vendor/github.com/google/go-github/v53/github/repos_forks.go index 97bb328ffb..f175dfe3b0 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_forks.go +++ b/vendor/github.com/google/go-github/v53/github/repos_forks.go @@ -7,9 +7,8 @@ package github import ( "context" - "fmt" - "encoding/json" + "fmt" ) // RepositoryListForksOptions specifies the optional parameters to the @@ -53,7 +52,9 @@ func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string, // RepositoriesService.CreateFork method. type RepositoryCreateForkOptions struct { // The organization to fork the repository into. - Organization string `url:"organization,omitempty"` + Organization string `json:"organization,omitempty"` + Name string `json:"name,omitempty"` + DefaultBranchOnly bool `json:"default_branch_only,omitempty"` } // CreateFork creates a fork of the specified repository. @@ -68,12 +69,8 @@ type RepositoryCreateForkOptions struct { // GitHub API docs: https://docs.github.com/en/rest/repos/forks#create-a-fork func (s *RepositoriesService) CreateFork(ctx context.Context, owner, repo string, opts *RepositoryCreateForkOptions) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - req, err := s.client.NewRequest("POST", u, nil) + req, err := s.client.NewRequest("POST", u, opts) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/google/go-github/v45/github/repos_hooks.go b/vendor/github.com/google/go-github/v53/github/repos_hooks.go similarity index 77% rename from vendor/github.com/google/go-github/v45/github/repos_hooks.go rename to vendor/github.com/google/go-github/v53/github/repos_hooks.go index 4e738cfe8c..ba229e7bca 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_hooks.go +++ b/vendor/github.com/google/go-github/v53/github/repos_hooks.go @@ -8,7 +8,9 @@ package github import ( "context" "fmt" - "time" + "net/http" + "net/url" + "strings" ) // WebHookPayload represents the data that is received from GitHub when a push @@ -37,8 +39,8 @@ type WebHookAuthor = CommitAuthor // Hook represents a GitHub (web and service) hook for a repository. type Hook struct { - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` URL *string `json:"url,omitempty"` ID *int64 `json:"id,omitempty"` Type *string `json:"type,omitempty"` @@ -197,3 +199,55 @@ func (s *RepositoriesService) TestHook(ctx context.Context, owner, repo string, } return s.client.Do(ctx, req, nil) } + +// Subscribe lets servers register to receive updates when a topic is updated. +// +// GitHub API docs: https://docs.github.com/en/rest/webhooks#pubsubhubbub +func (s *RepositoriesService) Subscribe(ctx context.Context, owner, repo, event, callback string, secret []byte) (*Response, error) { + req, err := s.createWebSubRequest("subscribe", owner, repo, event, callback, secret) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// Unsubscribe lets servers unregister to no longer receive updates when a topic is updated. +// +// GitHub API docs: https://docs.github.com/en/rest/webhooks#pubsubhubbub +func (s *RepositoriesService) Unsubscribe(ctx context.Context, owner, repo, event, callback string, secret []byte) (*Response, error) { + req, err := s.createWebSubRequest("unsubscribe", owner, repo, event, callback, secret) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// createWebSubRequest returns a subscribe/unsubscribe request that implements +// the WebSub (formerly PubSubHubbub) protocol. +// +// See: https://www.w3.org/TR/websub/#subscriber-sends-subscription-request +func (s *RepositoriesService) createWebSubRequest(hubMode, owner, repo, event, callback string, secret []byte) (*http.Request, error) { + topic := fmt.Sprintf( + "https://github.com/%s/%s/events/%s", + owner, + repo, + event, + ) + form := url.Values{} + form.Add("hub.mode", hubMode) + form.Add("hub.topic", topic) + form.Add("hub.callback", callback) + if secret != nil { + form.Add("hub.secret", string(secret)) + } + body := strings.NewReader(form.Encode()) + + req, err := s.client.NewFormRequest("hub", body) + if err != nil { + return nil, err + } + + return req, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_hooks_deliveries.go b/vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_hooks_deliveries.go rename to vendor/github.com/google/go-github/v53/github/repos_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_invitations.go b/vendor/github.com/google/go-github/v53/github/repos_invitations.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_invitations.go rename to vendor/github.com/google/go-github/v53/github/repos_invitations.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_keys.go b/vendor/github.com/google/go-github/v53/github/repos_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_keys.go rename to vendor/github.com/google/go-github/v53/github/repos_keys.go diff --git a/vendor/github.com/google/go-github/v53/github/repos_lfs.go b/vendor/github.com/google/go-github/v53/github/repos_lfs.go new file mode 100644 index 0000000000..6ac2e5a877 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_lfs.go @@ -0,0 +1,49 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// EnableLFS turns the LFS (Large File Storage) feature ON for the selected repo. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/lfs#enable-git-lfs-for-a-repository +func (s *RepositoriesService) EnableLFS(ctx context.Context, owner, repo string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/lfs", owner, repo) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// DisableLFS turns the LFS (Large File Storage) feature OFF for the selected repo. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/lfs#disable-git-lfs-for-a-repository +func (s *RepositoriesService) DisableLFS(ctx context.Context, owner, repo string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/lfs", owner, repo) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_merging.go b/vendor/github.com/google/go-github/v53/github/repos_merging.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_merging.go rename to vendor/github.com/google/go-github/v53/github/repos_merging.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_pages.go b/vendor/github.com/google/go-github/v53/github/repos_pages.go similarity index 67% rename from vendor/github.com/google/go-github/v45/github/repos_pages.go rename to vendor/github.com/google/go-github/v53/github/repos_pages.go index 9b864eb090..83075dbdd2 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_pages.go +++ b/vendor/github.com/google/go-github/v53/github/repos_pages.go @@ -17,6 +17,7 @@ type Pages struct { CNAME *string `json:"cname,omitempty"` Custom404 *bool `json:"custom_404,omitempty"` HTMLURL *string `json:"html_url,omitempty"` + BuildType *string `json:"build_type,omitempty"` Source *PagesSource `json:"source,omitempty"` Public *bool `json:"public,omitempty"` HTTPSCertificate *PagesHTTPSCertificate `json:"https_certificate,omitempty"` @@ -46,6 +47,44 @@ type PagesBuild struct { UpdatedAt *Timestamp `json:"updated_at,omitempty"` } +// PagesDomain represents a domain associated with a GitHub Pages site. +type PagesDomain struct { + Host *string `json:"host,omitempty"` + URI *string `json:"uri,omitempty"` + Nameservers *string `json:"nameservers,omitempty"` + DNSResolves *bool `json:"dns_resolves,omitempty"` + IsProxied *bool `json:"is_proxied,omitempty"` + IsCloudflareIP *bool `json:"is_cloudflare_ip,omitempty"` + IsFastlyIP *bool `json:"is_fastly_ip,omitempty"` + IsOldIPAddress *bool `json:"is_old_ip_address,omitempty"` + IsARecord *bool `json:"is_a_record,omitempty"` + HasCNAMERecord *bool `json:"has_cname_record,omitempty"` + HasMXRecordsPresent *bool `json:"has_mx_records_present,omitempty"` + IsValidDomain *bool `json:"is_valid_domain,omitempty"` + IsApexDomain *bool `json:"is_apex_domain,omitempty"` + ShouldBeARecord *bool `json:"should_be_a_record,omitempty"` + IsCNAMEToGithubUserDomain *bool `json:"is_cname_to_github_user_domain,omitempty"` + IsCNAMEToPagesDotGithubDotCom *bool `json:"is_cname_to_pages_dot_github_dot_com,omitempty"` + IsCNAMEToFastly *bool `json:"is_cname_to_fastly,omitempty"` + IsPointedToGithubPagesIP *bool `json:"is_pointed_to_github_pages_ip,omitempty"` + IsNonGithubPagesIPPresent *bool `json:"is_non_github_pages_ip_present,omitempty"` + IsPagesDomain *bool `json:"is_pages_domain,omitempty"` + IsServedByPages *bool `json:"is_served_by_pages,omitempty"` + IsValid *bool `json:"is_valid,omitempty"` + Reason *string `json:"reason,omitempty"` + RespondsToHTTPS *bool `json:"responds_to_https,omitempty"` + EnforcesHTTPS *bool `json:"enforces_https,omitempty"` + HTTPSError *string `json:"https_error,omitempty"` + IsHTTPSEligible *bool `json:"is_https_eligible,omitempty"` + CAAError *string `json:"caa_error,omitempty"` +} + +// PagesHealthCheckResponse represents the response given for the health check of a GitHub Pages site. +type PagesHealthCheckResponse struct { + Domain *PagesDomain `json:"domain,omitempty"` + AltDomain *PagesDomain `json:"alt_domain,omitempty"` +} + // PagesHTTPSCertificate represents the HTTPS Certificate information for a GitHub Pages site. type PagesHTTPSCertificate struct { State *string `json:"state,omitempty"` @@ -58,7 +97,8 @@ type PagesHTTPSCertificate struct { // createPagesRequest is a subset of Pages and is used internally // by EnablePages to pass only the known fields for the endpoint. type createPagesRequest struct { - Source *PagesSource `json:"source,omitempty"` + BuildType *string `json:"build_type,omitempty"` + Source *PagesSource `json:"source,omitempty"` } // EnablePages enables GitHub Pages for the named repo. @@ -68,7 +108,8 @@ func (s *RepositoriesService) EnablePages(ctx context.Context, owner, repo strin u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) pagesReq := &createPagesRequest{ - Source: pages.Source, + BuildType: pages.BuildType, + Source: pages.Source, } req, err := s.client.NewRequest("POST", u, pagesReq) @@ -92,9 +133,15 @@ type PagesUpdate struct { // CNAME represents a custom domain for the repository. // Leaving CNAME empty will remove the custom domain. CNAME *string `json:"cname"` + // BuildType is optional and can either be "legacy" or "workflow". + // "workflow" - You are using a github workflow to build your pages. + // "legacy" - You are deploying from a branch. + BuildType *string `json:"build_type,omitempty"` // Source must include the branch name, and may optionally specify the subdirectory "/docs". - // Possible values are: "gh-pages", "master", and "master /docs". - Source *string `json:"source,omitempty"` + // Possible values for Source.Branch are usually "gh-pages", "main", and "master", + // or any other existing branch name. + // Possible values for Source.Path are: "/", and "/docs". + Source *PagesSource `json:"source,omitempty"` // Public configures access controls for the site. // If "true", the site will be accessible to anyone on the internet. If "false", // the site will be accessible to anyone with read access to the repository that @@ -238,3 +285,22 @@ func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo return build, resp, nil } + +// GetPagesHealthCheck gets a DNS health check for the CNAME record configured for a repository's GitHub Pages. +// +// GitHub API docs: https://docs.github.com/en/rest/pages#get-a-dns-health-check-for-github-pages +func (s *RepositoriesService) GetPageHealthCheck(ctx context.Context, owner, repo string) (*PagesHealthCheckResponse, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/pages/health", owner, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + healthCheckResponse := new(PagesHealthCheckResponse) + resp, err := s.client.Do(ctx, req, healthCheckResponse) + if err != nil { + return nil, resp, err + } + + return healthCheckResponse, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_prereceive_hooks.go rename to vendor/github.com/google/go-github/v53/github/repos_prereceive_hooks.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_projects.go b/vendor/github.com/google/go-github/v53/github/repos_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_projects.go rename to vendor/github.com/google/go-github/v53/github/repos_projects.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_releases.go b/vendor/github.com/google/go-github/v53/github/repos_releases.go similarity index 95% rename from vendor/github.com/google/go-github/v45/github/repos_releases.go rename to vendor/github.com/google/go-github/v53/github/repos_releases.go index f1ab65c185..464c2ee1e2 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_releases.go +++ b/vendor/github.com/google/go-github/v53/github/repos_releases.go @@ -19,14 +19,18 @@ import ( // RepositoryRelease represents a GitHub release in a repository. type RepositoryRelease struct { - TagName *string `json:"tag_name,omitempty"` - TargetCommitish *string `json:"target_commitish,omitempty"` - Name *string `json:"name,omitempty"` - Body *string `json:"body,omitempty"` - Draft *bool `json:"draft,omitempty"` - Prerelease *bool `json:"prerelease,omitempty"` + TagName *string `json:"tag_name,omitempty"` + TargetCommitish *string `json:"target_commitish,omitempty"` + Name *string `json:"name,omitempty"` + Body *string `json:"body,omitempty"` + Draft *bool `json:"draft,omitempty"` + Prerelease *bool `json:"prerelease,omitempty"` + // MakeLatest can be one of: "true", "false", or "legacy". + MakeLatest *string `json:"make_latest,omitempty"` DiscussionCategoryName *string `json:"discussion_category_name,omitempty"` - GenerateReleaseNotes *bool `json:"generate_release_notes,omitempty"` + + // The following fields are not used in EditRelease: + GenerateReleaseNotes *bool `json:"generate_release_notes,omitempty"` // The following fields are not used in CreateRelease or EditRelease: ID *int64 `json:"id,omitempty"` @@ -174,6 +178,7 @@ type repositoryReleaseRequest struct { Body *string `json:"body,omitempty"` Draft *bool `json:"draft,omitempty"` Prerelease *bool `json:"prerelease,omitempty"` + MakeLatest *string `json:"make_latest,omitempty"` GenerateReleaseNotes *bool `json:"generate_release_notes,omitempty"` DiscussionCategoryName *string `json:"discussion_category_name,omitempty"` } @@ -194,6 +199,7 @@ func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo str Body: release.Body, Draft: release.Draft, Prerelease: release.Prerelease, + MakeLatest: release.MakeLatest, DiscussionCategoryName: release.DiscussionCategoryName, GenerateReleaseNotes: release.GenerateReleaseNotes, } @@ -227,8 +233,8 @@ func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo strin Body: release.Body, Draft: release.Draft, Prerelease: release.Prerelease, + MakeLatest: release.MakeLatest, DiscussionCategoryName: release.DiscussionCategoryName, - GenerateReleaseNotes: release.GenerateReleaseNotes, } req, err := s.client.NewRequest("PATCH", u, releaseReq) diff --git a/vendor/github.com/google/go-github/v45/github/repos_stats.go b/vendor/github.com/google/go-github/v53/github/repos_stats.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_stats.go rename to vendor/github.com/google/go-github/v53/github/repos_stats.go diff --git a/vendor/github.com/google/go-github/v45/github/repos_statuses.go b/vendor/github.com/google/go-github/v53/github/repos_statuses.go similarity index 97% rename from vendor/github.com/google/go-github/v45/github/repos_statuses.go rename to vendor/github.com/google/go-github/v53/github/repos_statuses.go index 42238f3c9d..ea3d166c75 100644 --- a/vendor/github.com/google/go-github/v45/github/repos_statuses.go +++ b/vendor/github.com/google/go-github/v53/github/repos_statuses.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // RepoStatus represents the status of a repository at a particular reference. @@ -35,8 +34,8 @@ type RepoStatus struct { AvatarURL *string `json:"avatar_url,omitempty"` Creator *User `json:"creator,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - UpdatedAt *time.Time `json:"updated_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` } func (r RepoStatus) String() string { diff --git a/vendor/github.com/google/go-github/v53/github/repos_tags.go b/vendor/github.com/google/go-github/v53/github/repos_tags.go new file mode 100644 index 0000000000..ff46d90c73 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_tags.go @@ -0,0 +1,76 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// TagProtection represents a repository tag protection. +type TagProtection struct { + ID *int64 `json:"id"` + Pattern *string `json:"pattern"` +} + +// tagProtectionRequest represents a request to create tag protection. +type tagProtectionRequest struct { + // An optional glob pattern to match against when enforcing tag protection. + Pattern string `json:"pattern"` +} + +// ListTagProtection lists tag protection of the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/tags#list-tag-protection-states-for-a-repository +func (s *RepositoriesService) ListTagProtection(ctx context.Context, owner, repo string) ([]*TagProtection, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/tags/protection", owner, repo) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var tagProtections []*TagProtection + resp, err := s.client.Do(ctx, req, &tagProtections) + if err != nil { + return nil, resp, err + } + + return tagProtections, resp, nil +} + +// CreateTagProtection creates the tag protection of the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/tags#create-a-tag-protection-state-for-a-repository +func (s *RepositoriesService) CreateTagProtection(ctx context.Context, owner, repo, pattern string) (*TagProtection, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/tags/protection", owner, repo) + r := &tagProtectionRequest{Pattern: pattern} + req, err := s.client.NewRequest("POST", u, r) + if err != nil { + return nil, nil, err + } + + tagProtection := new(TagProtection) + resp, err := s.client.Do(ctx, req, tagProtection) + if err != nil { + return nil, resp, err + } + + return tagProtection, resp, nil +} + +// DeleteTagProtection deletes a tag protection from the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/tags#delete-a-tag-protection-state-for-a-repository +func (s *RepositoriesService) DeleteTagProtection(ctx context.Context, owner, repo string, tagProtectionID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/tags/protection/%v", owner, repo, tagProtectionID) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_traffic.go b/vendor/github.com/google/go-github/v53/github/repos_traffic.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/repos_traffic.go rename to vendor/github.com/google/go-github/v53/github/repos_traffic.go diff --git a/vendor/github.com/google/go-github/v45/github/scim.go b/vendor/github.com/google/go-github/v53/github/scim.go similarity index 78% rename from vendor/github.com/google/go-github/v45/github/scim.go rename to vendor/github.com/google/go-github/v53/github/scim.go index c4abb9ab3e..7deee6be4b 100644 --- a/vendor/github.com/google/go-github/v45/github/scim.go +++ b/vendor/github.com/google/go-github/v53/github/scim.go @@ -29,6 +29,9 @@ type SCIMUserAttributes struct { ExternalID *string `json:"externalId,omitempty"` // (Optional.) Groups []string `json:"groups,omitempty"` // (Optional.) Active *bool `json:"active,omitempty"` // (Optional.) + // Only populated as a result of calling ListSCIMProvisionedIdentitiesOptions or GetSCIMProvisioningInfoForUser: + ID *string `json:"id,omitempty"` + Meta *SCIMMeta `json:"meta,omitempty"` } // SCIMUserName represents SCIM user information. @@ -38,41 +41,66 @@ type SCIMUserName struct { Formatted *string `json:"formatted,omitempty"` // (Optional.) } -//SCIMUserEmail represents SCIM user email. +// SCIMUserEmail represents SCIM user email. type SCIMUserEmail struct { Value string `json:"value"` // (Required.) Primary *bool `json:"primary,omitempty"` // (Optional.) Type *string `json:"type,omitempty"` // (Optional.) } +// SCIMMeta represents metadata about the SCIM resource. +type SCIMMeta struct { + ResourceType *string `json:"resourceType,omitempty"` + Created *Timestamp `json:"created,omitempty"` + LastModified *Timestamp `json:"lastModified,omitempty"` + Location *string `json:"location,omitempty"` +} + +// SCIMProvisionedIdentities represents the result of calling ListSCIMProvisionedIdentities. +type SCIMProvisionedIdentities struct { + Schemas []string `json:"schemas,omitempty"` + TotalResults *int `json:"totalResults,omitempty"` + ItemsPerPage *int `json:"itemsPerPage,omitempty"` + StartIndex *int `json:"startIndex,omitempty"` + Resources []*SCIMUserAttributes `json:"Resources,omitempty"` +} + // ListSCIMProvisionedIdentitiesOptions represents options for ListSCIMProvisionedIdentities. // // Github API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities--parameters type ListSCIMProvisionedIdentitiesOptions struct { - StartIndex *int `json:"startIndex,omitempty"` // Used for pagination: the index of the first result to return. (Optional.) - Count *int `json:"count,omitempty"` // Used for pagination: the number of results to return. (Optional.) + StartIndex *int `url:"startIndex,omitempty"` // Used for pagination: the index of the first result to return. (Optional.) + Count *int `url:"count,omitempty"` // Used for pagination: the number of results to return. (Optional.) // Filter results using the equals query parameter operator (eq). // You can filter results that are equal to id, userName, emails, and external_id. // For example, to search for an identity with the userName Octocat, you would use this query: ?filter=userName%20eq%20\"Octocat\". // To filter results for the identity with the email octocat@github.com, you would use this query: ?filter=emails%20eq%20\"octocat@github.com\". // (Optional.) - Filter *string `json:"filter,omitempty"` + Filter *string `url:"filter,omitempty"` } // ListSCIMProvisionedIdentities lists SCIM provisioned identities. // // GitHub API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities -func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org string, opts *ListSCIMProvisionedIdentitiesOptions) (*Response, error) { +func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org string, opts *ListSCIMProvisionedIdentitiesOptions) (*SCIMProvisionedIdentities, *Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) u, err := addOptions(u, opts) if err != nil { - return nil, err + return nil, nil, err } + req, err := s.client.NewRequest("GET", u, nil) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(ctx, req, nil) + + identities := new(SCIMProvisionedIdentities) + resp, err := s.client.Do(ctx, req, identities) + if err != nil { + return nil, resp, err + } + + return identities, resp, nil } // ProvisionAndInviteSCIMUser provisions organization membership for a user, and sends an activation email to the email address. @@ -84,23 +112,32 @@ func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string if err != nil { return nil, err } + req, err := s.client.NewRequest("POST", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // GetSCIMProvisioningInfoForUser returns SCIM provisioning information for a user. // // GitHub API docs: https://docs.github.com/en/rest/scim#supported-scim-user-attributes -func (s *SCIMService) GetSCIMProvisioningInfoForUser(ctx context.Context, org, scimUserID string) (*Response, error) { +func (s *SCIMService) GetSCIMProvisioningInfoForUser(ctx context.Context, org, scimUserID string) (*SCIMUserAttributes, *Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) req, err := s.client.NewRequest("GET", u, nil) if err != nil { - return nil, err + return nil, nil, err } - return s.client.Do(ctx, req, nil) + + user := new(SCIMUserAttributes) + resp, err := s.client.Do(ctx, req, &user) + if err != nil { + return nil, resp, err + } + + return user, resp, nil } // UpdateProvisionedOrgMembership updates a provisioned organization membership. @@ -112,10 +149,12 @@ func (s *SCIMService) UpdateProvisionedOrgMembership(ctx context.Context, org, s if err != nil { return nil, err } + req, err := s.client.NewRequest("PUT", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } @@ -143,10 +182,12 @@ func (s *SCIMService) UpdateAttributeForSCIMUser(ctx context.Context, org, scimU if err != nil { return nil, err } + req, err := s.client.NewRequest("PATCH", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } @@ -159,5 +200,6 @@ func (s *SCIMService) DeleteSCIMUserFromOrg(ctx context.Context, org, scimUserID if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } diff --git a/vendor/github.com/google/go-github/v45/github/search.go b/vendor/github.com/google/go-github/v53/github/search.go similarity index 93% rename from vendor/github.com/google/go-github/v45/github/search.go rename to vendor/github.com/google/go-github/v53/github/search.go index 344f1bb985..adb832d0d8 100644 --- a/vendor/github.com/google/go-github/v45/github/search.go +++ b/vendor/github.com/google/go-github/v53/github/search.go @@ -9,6 +9,7 @@ import ( "context" "fmt" "strconv" + "strings" qs "github.com/google/go-querystring/query" ) @@ -19,8 +20,10 @@ import ( // Each method takes a query string defining the search keywords and any search qualifiers. // For example, when searching issues, the query "gopher is:issue language:go" will search // for issues containing the word "gopher" in Go repositories. The method call -// opts := &github.SearchOptions{Sort: "created", Order: "asc"} -// cl.Search.Issues(ctx, "gopher is:issue language:go", opts) +// +// opts := &github.SearchOptions{Sort: "created", Order: "asc"} +// cl.Search.Issues(ctx, "gopher is:issue language:go", opts) +// // will search for such issues, sorting by creation date in ascending order // (i.e., oldest first). // @@ -299,29 +302,32 @@ func (s *SearchService) search(ctx context.Context, searchType string, parameter if err != nil { return nil, err } - + var acceptHeaders []string switch { case searchType == "commits": // Accept header for search commits preview endpoint // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeCommitSearchPreview) + acceptHeaders = append(acceptHeaders, mediaTypeCommitSearchPreview) case searchType == "topics": // Accept header for search repositories based on topics preview endpoint // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) + acceptHeaders = append(acceptHeaders, mediaTypeTopicsPreview) case searchType == "repositories": // Accept header for search repositories based on topics preview endpoint // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeTopicsPreview) + acceptHeaders = append(acceptHeaders, mediaTypeTopicsPreview) case searchType == "issues": // Accept header for search issues based on reactions preview endpoint // TODO: remove custom Accept header when this API fully launches. - req.Header.Set("Accept", mediaTypeReactionsPreview) - case opts != nil && opts.TextMatch: - // Accept header defaults to "application/vnd.github.v3+json" - // We change it here to fetch back text-match metadata - req.Header.Set("Accept", "application/vnd.github.v3.text-match+json") + acceptHeaders = append(acceptHeaders, mediaTypeReactionsPreview) + } + // https://docs.github.com/en/rest/search#search-repositories + // Accept header defaults to "application/vnd.github.v3+json" + // We change it here to fetch back text-match metadata + if opts != nil && opts.TextMatch { + acceptHeaders = append(acceptHeaders, "application/vnd.github.v3.text-match+json") } + req.Header.Set("Accept", strings.Join(acceptHeaders, ", ")) return s.client.Do(ctx, req, result) } diff --git a/vendor/github.com/google/go-github/v45/github/secret_scanning.go b/vendor/github.com/google/go-github/v53/github/secret_scanning.go similarity index 94% rename from vendor/github.com/google/go-github/v45/github/secret_scanning.go rename to vendor/github.com/google/go-github/v53/github/secret_scanning.go index ec64950a67..d512560d9f 100644 --- a/vendor/github.com/google/go-github/v45/github/secret_scanning.go +++ b/vendor/github.com/google/go-github/v53/github/secret_scanning.go @@ -61,6 +61,14 @@ type SecretScanningAlertListOptions struct { Resolution string `url:"resolution,omitempty"` ListCursorOptions + + // List options can vary on the Enterprise type. + // On Enterprise Cloud, Secret Scan alerts support requesting by page number + // along with providing a cursor for an "after" param. + // See: https://docs.github.com/en/enterprise-cloud@latest/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization + // Whereas on Enterprise Server, pagination is by index. + // See: https://docs.github.com/en/enterprise-server@3.6/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization + ListOptions } // SecretScanningAlertUpdateOptions specifies optional parameters to the SecretScanningService.UpdateAlert method. diff --git a/vendor/github.com/google/go-github/v45/github/strings.go b/vendor/github.com/google/go-github/v53/github/strings.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/strings.go rename to vendor/github.com/google/go-github/v53/github/strings.go diff --git a/vendor/github.com/google/go-github/v45/github/teams.go b/vendor/github.com/google/go-github/v53/github/teams.go similarity index 97% rename from vendor/github.com/google/go-github/v45/github/teams.go rename to vendor/github.com/google/go-github/v53/github/teams.go index 38845e0953..0ee7c20079 100644 --- a/vendor/github.com/google/go-github/v45/github/teams.go +++ b/vendor/github.com/google/go-github/v53/github/teams.go @@ -10,7 +10,6 @@ import ( "fmt" "net/http" "strings" - "time" ) // TeamsService provides access to the team-related functions @@ -68,7 +67,7 @@ type Invitation struct { Email *string `json:"email,omitempty"` // Role can be one of the values - 'direct_member', 'admin', 'billing_manager', 'hiring_manager', or 'reinstate'. Role *string `json:"role,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` Inviter *User `json:"inviter,omitempty"` TeamCount *int `json:"team_count,omitempty"` InvitationTeamURL *string `json:"invitation_team_url,omitempty"` @@ -914,7 +913,7 @@ type ListExternalGroupsOptions struct { ListOptions } -// ListExternalGroups lists external groups connected to a team on GitHub. +// ListExternalGroups lists external groups in an organization on GitHub. // // GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#list-external-groups-in-an-organization func (s *TeamsService) ListExternalGroups(ctx context.Context, org string, opts *ListExternalGroupsOptions) (*ExternalGroupList, *Response, error) { @@ -938,6 +937,26 @@ func (s *TeamsService) ListExternalGroups(ctx context.Context, org string, opts return externalGroups, resp, nil } +// ListExternalGroupsForTeamBySlug lists external groups connected to a team on GitHub. +// +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#list-a-connection-between-an-external-group-and-a-team +func (s *TeamsService) ListExternalGroupsForTeamBySlug(ctx context.Context, org, slug string) (*ExternalGroupList, *Response, error) { + u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + externalGroups := new(ExternalGroupList) + resp, err := s.client.Do(ctx, req, externalGroups) + if err != nil { + return nil, resp, err + } + + return externalGroups, resp, nil +} + // UpdateConnectedExternalGroup updates the connection between an external group and a team. // // GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#update-the-connection-between-an-external-group-and-a-team diff --git a/vendor/github.com/google/go-github/v45/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/teams_discussion_comments.go rename to vendor/github.com/google/go-github/v53/github/teams_discussion_comments.go diff --git a/vendor/github.com/google/go-github/v45/github/teams_discussions.go b/vendor/github.com/google/go-github/v53/github/teams_discussions.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/teams_discussions.go rename to vendor/github.com/google/go-github/v53/github/teams_discussions.go diff --git a/vendor/github.com/google/go-github/v45/github/teams_members.go b/vendor/github.com/google/go-github/v53/github/teams_members.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/teams_members.go rename to vendor/github.com/google/go-github/v53/github/teams_members.go diff --git a/vendor/github.com/google/go-github/v45/github/timestamp.go b/vendor/github.com/google/go-github/v53/github/timestamp.go similarity index 90% rename from vendor/github.com/google/go-github/v45/github/timestamp.go rename to vendor/github.com/google/go-github/v53/github/timestamp.go index 1061a55e68..00c1235e9d 100644 --- a/vendor/github.com/google/go-github/v45/github/timestamp.go +++ b/vendor/github.com/google/go-github/v53/github/timestamp.go @@ -22,6 +22,14 @@ func (t Timestamp) String() string { return t.Time.String() } +// GetTime returns std time.Time. +func (t *Timestamp) GetTime() *time.Time { + if t == nil { + return nil + } + return &t.Time +} + // UnmarshalJSON implements the json.Unmarshaler interface. // Time is expected in RFC3339 or Unix format. func (t *Timestamp) UnmarshalJSON(data []byte) (err error) { diff --git a/vendor/github.com/google/go-github/v45/github/users.go b/vendor/github.com/google/go-github/v53/github/users.go similarity index 98% rename from vendor/github.com/google/go-github/v45/github/users.go rename to vendor/github.com/google/go-github/v53/github/users.go index d40d23e90f..1b0670103b 100644 --- a/vendor/github.com/google/go-github/v45/github/users.go +++ b/vendor/github.com/google/go-github/v53/github/users.go @@ -41,8 +41,8 @@ type User struct { SuspendedAt *Timestamp `json:"suspended_at,omitempty"` Type *string `json:"type,omitempty"` SiteAdmin *bool `json:"site_admin,omitempty"` - TotalPrivateRepos *int `json:"total_private_repos,omitempty"` - OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"` + TotalPrivateRepos *int64 `json:"total_private_repos,omitempty"` + OwnedPrivateRepos *int64 `json:"owned_private_repos,omitempty"` PrivateGists *int `json:"private_gists,omitempty"` DiskUsage *int `json:"disk_usage,omitempty"` Collaborators *int `json:"collaborators,omitempty"` diff --git a/vendor/github.com/google/go-github/v45/github/users_administration.go b/vendor/github.com/google/go-github/v53/github/users_administration.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/users_administration.go rename to vendor/github.com/google/go-github/v53/github/users_administration.go diff --git a/vendor/github.com/google/go-github/v45/github/users_blocking.go b/vendor/github.com/google/go-github/v53/github/users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/users_blocking.go rename to vendor/github.com/google/go-github/v53/github/users_blocking.go diff --git a/vendor/github.com/google/go-github/v45/github/users_emails.go b/vendor/github.com/google/go-github/v53/github/users_emails.go similarity index 73% rename from vendor/github.com/google/go-github/v45/github/users_emails.go rename to vendor/github.com/google/go-github/v53/github/users_emails.go index be7e0f819e..67bd210e8d 100644 --- a/vendor/github.com/google/go-github/v45/github/users_emails.go +++ b/vendor/github.com/google/go-github/v53/github/users_emails.go @@ -70,3 +70,28 @@ func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Resp return s.client.Do(ctx, req, nil) } + +// SetEmailVisibility sets the visibility for the primary email address of the authenticated user. +// `visibility` can be "private" or "public". +// +// GitHub API docs: https://docs.github.com/en/rest/users/emails#set-primary-email-visibility-for-the-authenticated-user +func (s *UsersService) SetEmailVisibility(ctx context.Context, visibility string) ([]*UserEmail, *Response, error) { + u := "user/email/visibility" + + updateVisiblilityReq := &UserEmail{ + Visibility: &visibility, + } + + req, err := s.client.NewRequest("PATCH", u, updateVisiblilityReq) + if err != nil { + return nil, nil, err + } + + var e []*UserEmail + resp, err := s.client.Do(ctx, req, &e) + if err != nil { + return nil, resp, err + } + + return e, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/users_followers.go b/vendor/github.com/google/go-github/v53/github/users_followers.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/users_followers.go rename to vendor/github.com/google/go-github/v53/github/users_followers.go diff --git a/vendor/github.com/google/go-github/v45/github/users_gpg_keys.go b/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/users_gpg_keys.go rename to vendor/github.com/google/go-github/v53/github/users_gpg_keys.go index e9ce62221c..54189b8307 100644 --- a/vendor/github.com/google/go-github/v45/github/users_gpg_keys.go +++ b/vendor/github.com/google/go-github/v53/github/users_gpg_keys.go @@ -8,7 +8,6 @@ package github import ( "context" "fmt" - "time" ) // GPGKey represents a GitHub user's public GPG key used to verify GPG signed commits and tags. @@ -26,8 +25,8 @@ type GPGKey struct { CanEncryptComms *bool `json:"can_encrypt_comms,omitempty"` CanEncryptStorage *bool `json:"can_encrypt_storage,omitempty"` CanCertify *bool `json:"can_certify,omitempty"` - CreatedAt *time.Time `json:"created_at,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + ExpiresAt *Timestamp `json:"expires_at,omitempty"` } // String stringifies a GPGKey. diff --git a/vendor/github.com/google/go-github/v45/github/users_keys.go b/vendor/github.com/google/go-github/v53/github/users_keys.go similarity index 96% rename from vendor/github.com/google/go-github/v45/github/users_keys.go rename to vendor/github.com/google/go-github/v53/github/users_keys.go index 59d26cdefa..b49b8e4b4e 100644 --- a/vendor/github.com/google/go-github/v45/github/users_keys.go +++ b/vendor/github.com/google/go-github/v53/github/users_keys.go @@ -19,6 +19,8 @@ type Key struct { ReadOnly *bool `json:"read_only,omitempty"` Verified *bool `json:"verified,omitempty"` CreatedAt *Timestamp `json:"created_at,omitempty"` + AddedBy *string `json:"added_by,omitempty"` + LastUsed *Timestamp `json:"last_used,omitempty"` } func (k Key) String() string { diff --git a/vendor/github.com/google/go-github/v45/github/users_packages.go b/vendor/github.com/google/go-github/v53/github/users_packages.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/users_packages.go rename to vendor/github.com/google/go-github/v53/github/users_packages.go diff --git a/vendor/github.com/google/go-github/v45/github/users_projects.go b/vendor/github.com/google/go-github/v53/github/users_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/users_projects.go rename to vendor/github.com/google/go-github/v53/github/users_projects.go diff --git a/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go b/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go new file mode 100644 index 0000000000..567623f887 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/users_ssh_signing_keys.go @@ -0,0 +1,108 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// SSHSigningKey represents a public SSH key used to sign git commits. +type SSHSigningKey struct { + ID *int64 `json:"id,omitempty"` + Key *string `json:"key,omitempty"` + Title *string `json:"title,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` +} + +func (k SSHSigningKey) String() string { + return Stringify(k) +} + +// ListSSHSigningKeys lists the SSH signing keys for a user. Passing an empty +// username string will fetch SSH signing keys for the authenticated user. +// +// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#list-ssh-signing-keys-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#list-ssh-signing-keys-for-a-user +func (s *UsersService) ListSSHSigningKeys(ctx context.Context, user string, opts *ListOptions) ([]*SSHSigningKey, *Response, error) { + var u string + if user != "" { + u = fmt.Sprintf("users/%v/ssh_signing_keys", user) + } else { + u = "user/ssh_signing_keys" + } + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var keys []*SSHSigningKey + resp, err := s.client.Do(ctx, req, &keys) + if err != nil { + return nil, resp, err + } + + return keys, resp, nil +} + +// GetSSHSigningKey fetches a single SSH signing key for the authenticated user. +// +// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#get-an-ssh-signing-key-for-the-authenticated-user +func (s *UsersService) GetSSHSigningKey(ctx context.Context, id int64) (*SSHSigningKey, *Response, error) { + u := fmt.Sprintf("user/ssh_signing_keys/%v", id) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + key := new(SSHSigningKey) + resp, err := s.client.Do(ctx, req, key) + if err != nil { + return nil, resp, err + } + + return key, resp, nil +} + +// CreateSSHSigningKey adds a SSH signing key for the authenticated user. +// +// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#create-a-ssh-signing-key-for-the-authenticated-user +func (s *UsersService) CreateSSHSigningKey(ctx context.Context, key *Key) (*SSHSigningKey, *Response, error) { + u := "user/ssh_signing_keys" + + req, err := s.client.NewRequest("POST", u, key) + if err != nil { + return nil, nil, err + } + + k := new(SSHSigningKey) + resp, err := s.client.Do(ctx, req, k) + if err != nil { + return nil, resp, err + } + + return k, resp, nil +} + +// DeleteKey deletes a SSH signing key for the authenticated user. +// +// GitHub API docs: https://docs.github.com/en/rest/users/ssh-signing-keys#delete-an-ssh-signing-key-for-the-authenticated-user +func (s *UsersService) DeleteSSHSigningKey(ctx context.Context, id int64) (*Response, error) { + u := fmt.Sprintf("user/ssh_signing_keys/%v", id) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v45/github/with_appengine.go b/vendor/github.com/google/go-github/v53/github/with_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/with_appengine.go rename to vendor/github.com/google/go-github/v53/github/with_appengine.go diff --git a/vendor/github.com/google/go-github/v45/github/without_appengine.go b/vendor/github.com/google/go-github/v53/github/without_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v45/github/without_appengine.go rename to vendor/github.com/google/go-github/v53/github/without_appengine.go diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml deleted file mode 100644 index fc198d8827..0000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.13.x - - 1.14.x - - 1.15.x - -env: - global: - - GO111MODULE=on - -script: - - make test - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md deleted file mode 100644 index 6eeb7e2dc3..0000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md +++ /dev/null @@ -1,51 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -Types of changes: -- `Added` for new features. -- `Changed` for changes in existing functionality. -- `Deprecated` for soon-to-be removed features. -- `Removed` for now removed features. -- `Fixed` for any bug fixes. -- `Security` in case of vulnerabilities. - -## [Unreleased] - -### Added - -- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f) - -## [v1.1.0] - 2019-09-12 -### Added -- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules. -- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs) -- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao) -- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad) -- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd) -- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok) -- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled - -### Deprecated -- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42) -- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of . - -### Fixed -- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst) -- Numerious documentation fixes. - -## v1.0.0 - 2018-05-08 -### Added -- grpc_auth -- grpc_ctxtags -- grpc_zap -- grpc_logrus -- grpc_opentracing -- grpc_retry -- grpc_validator -- grpc_recovery - -[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD -[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md index 814e155176..a12b40904b 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md @@ -7,16 +7,23 @@ [![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) [![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) -[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE) +[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://gophers.slack.com/archives/CNJL30P4P) [gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. +## âš ï¸ Status + +Version [v2](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2) is about to be released, with migration guide, which will replace v1. Try v2 and give us feedback! + +Version v1 is currently in deprecation mode, which means only critical and safety bug fixes will be merged. + + ## Middleware [gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for -Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) +Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement -common patterns: auth, logging, message, validation, retries or monitoring. +common patterns: auth, logging, message, validation, retries, or monitoring. These are generic building blocks that make it easy to build multiple microservices easily. The purpose of this repository is to act as a go-to point for such reusable functionality. It contains @@ -29,57 +36,57 @@ import "github.com/grpc-ecosystem/go-grpc-middleware" myServer := grpc.NewServer( grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - grpc_recovery.StreamServerInterceptor(), grpc_ctxtags.StreamServerInterceptor(), grpc_opentracing.StreamServerInterceptor(), grpc_prometheus.StreamServerInterceptor, grpc_zap.StreamServerInterceptor(zapLogger), grpc_auth.StreamServerInterceptor(myAuthFunction), + grpc_recovery.StreamServerInterceptor(), )), grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpc_recovery.UnaryServerInterceptor(), grpc_ctxtags.UnaryServerInterceptor(), grpc_opentracing.UnaryServerInterceptor(), grpc_prometheus.UnaryServerInterceptor, grpc_zap.UnaryServerInterceptor(zapLogger), grpc_auth.UnaryServerInterceptor(myAuthFunction), + grpc_recovery.UnaryServerInterceptor(), )), ) ``` ## Interceptors -*Please send a PR to add new interceptors or middleware to this list* +_Please send a PR to add new interceptors or middleware to this list_ #### Auth - * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware + +- [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware #### Logging - * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body - * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. - * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. - * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers. - * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe). + +- [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body +- [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. +- [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. +- [`grpc_kit`](logging/kit/) - integration of [go-kit/log](https://github.com/go-kit/log) logging library into gRPC handlers. +- [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe). #### Monitoring - * [`grpc_prometheus`âš¡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware - * [`otgrpc`âš¡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors - * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags -#### Client - * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware +- [`grpc_prometheus`âš¡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware +- [`otgrpc`âš¡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors +- [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags +- [`otelgrpc`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation/google.golang.org/grpc/otelgrpc) - [OpenTelemetry](https://opentelemetry.io/) client-side and server-side interceptors -#### Server - * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options - * [`grpc_recovery`](recovery/) - turn panics into gRPC errors - * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter +#### Client +- [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware -## Status +#### Server -This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io). +- [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options +- [`grpc_recovery`](recovery/) - turn panics into gRPC errors +- [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter -Additional tooling will be added, and contributions are welcome. ## License diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go index ea3738b896..407d9332c9 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go @@ -16,22 +16,41 @@ import ( // Execution is done in left-to-right order, including passing of context. // For example ChainUnaryServer(one, two, three) will execute one before two before three, and three // will see context changes of one and two. +// +// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainUnaryInterceptor directly. func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { n := len(interceptors) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler { - return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { - return currentInter(currentCtx, currentReq, info, currentHandler) - } + // Dummy interceptor maintained for backward compatibility to avoid returning nil. + if n == 0 { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return handler(ctx, req) } + } - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } + // The degenerate case, just return the single wrapped interceptor directly. + if n == 1 { + return interceptors[0] + } - return chainedHandler(ctx, req) + // Return a function which satisfies the interceptor interface, and which is + // a closure over the given list of interceptors to be chained. + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + currHandler := handler + // Iterate backwards through all interceptors except the first (outermost). + // Wrap each one in a function which satisfies the handler interface, but + // is also a closure over the `info` and `handler` parameters. Then pass + // each pseudo-handler to the next outer interceptor as the handler to be called. + for i := n - 1; i > 0; i-- { + // Rebind to loop-local vars so they can be closed over. + innerHandler, i := currHandler, i + currHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { + return interceptors[i](currentCtx, currentReq, info, innerHandler) + } + } + // Finally return the result of calling the outermost interceptor with the + // outermost pseudo-handler created above as its handler. + return interceptors[0](ctx, req, info, currHandler) } } @@ -40,22 +59,31 @@ func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnarySer // Execution is done in left-to-right order, including passing of context. // For example ChainUnaryServer(one, two, three) will execute one before two before three. // If you want to pass context between interceptors, use WrapServerStream. +// +// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainStreamInterceptor directly. func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor { n := len(interceptors) - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler { - return func(currentSrv interface{}, currentStream grpc.ServerStream) error { - return currentInter(currentSrv, currentStream, info, currentHandler) - } + // Dummy interceptor maintained for backward compatibility to avoid returning nil. + if n == 0 { + return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return handler(srv, stream) } + } - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } + if n == 1 { + return interceptors[0] + } - return chainedHandler(srv, ss) + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + currHandler := handler + for i := n - 1; i > 0; i-- { + innerHandler, i := currHandler, i + currHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error { + return interceptors[i](currentSrv, currentStream, info, innerHandler) + } + } + return interceptors[0](srv, stream, info, currHandler) } } @@ -66,19 +94,26 @@ func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.Stream func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { n := len(interceptors) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker { - return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { - return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...) - } + // Dummy interceptor maintained for backward compatibility to avoid returning nil. + if n == 0 { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return invoker(ctx, method, req, reply, cc, opts...) } + } - chainedInvoker := invoker - for i := n - 1; i >= 0; i-- { - chainedInvoker = chainer(interceptors[i], chainedInvoker) - } + if n == 1 { + return interceptors[0] + } - return chainedInvoker(ctx, method, req, reply, cc, opts...) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + currInvoker := invoker + for i := n - 1; i > 0; i-- { + innerInvoker, i := currInvoker, i + currInvoker = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + return interceptors[i](currentCtx, currentMethod, currentReq, currentRepl, currentConn, innerInvoker, currentOpts...) + } + } + return interceptors[0](ctx, method, req, reply, cc, currInvoker, opts...) } } @@ -89,19 +124,26 @@ func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryCli func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor { n := len(interceptors) - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer { - return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { - return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...) - } + // Dummy interceptor maintained for backward compatibility to avoid returning nil. + if n == 0 { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, opts...) } + } - chainedStreamer := streamer - for i := n - 1; i >= 0; i-- { - chainedStreamer = chainer(interceptors[i], chainedStreamer) - } + if n == 1 { + return interceptors[0] + } - return chainedStreamer(ctx, desc, cc, method, opts...) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + currStreamer := streamer + for i := n - 1; i > 0; i-- { + innerStreamer, i := currStreamer, i + currStreamer = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { + return interceptors[i](currentCtx, currentDesc, currentConn, currentMethod, innerStreamer, currentOpts...) + } + } + return interceptors[0](ctx, desc, cc, method, currStreamer, opts...) } } @@ -109,12 +151,16 @@ func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.Stream // // WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors. // Basically syntactic sugar. +// +// Deprecated: use google.golang.org/grpc.ChainUnaryInterceptor instead. func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption { - return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...)) + return grpc.ChainUnaryInterceptor(interceptors...) } // WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors. // Basically syntactic sugar. +// +// Deprecated: use google.golang.org/grpc.ChainStreamInterceptor instead. func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption { - return grpc.StreamInterceptor(ChainStreamServer(interceptors...)) + return grpc.ChainStreamInterceptor(interceptors...) } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go index afd924a140..f8ba7198a5 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -18,7 +18,7 @@ Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC c linear backoff with 10% jitter. For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever when a retry happens. +whenever a retry happens. Please see examples for more advanced use. */ diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go index 62d8312018..003bbd9066 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -5,8 +5,8 @@ package grpc_retry import ( "context" - "fmt" "io" + "strconv" "sync" "time" @@ -136,7 +136,6 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto type serverStreamingRetryingStream struct { grpc.ClientStream bufferedSends []interface{} // single message that the client can sen - receivedGood bool // indicates whether any prior receives were successful wasClosedSend bool // indicates that CloseSend was closed parentCtx context.Context callOpts *options @@ -209,17 +208,8 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { } func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { - s.mu.RLock() - wasGood := s.receivedGood - s.mu.RUnlock() err := s.getStream().RecvMsg(m) if err == nil || err == io.EOF { - s.mu.Lock() - s.receivedGood = true - s.mu.Unlock() - return false, err - } else if wasGood { - // previous RecvMsg in the stream succeeded, no retry logic should interfere return false, err } if isContextError(err) { @@ -303,7 +293,7 @@ func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) } if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) ctx = mdClone.ToOutgoing(ctx) } return ctx diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go index 1c60585dd3..15225d710a 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc/metadata" ) -// NiceMD is a convenience wrapper definiting extra functions on the metadata. +// NiceMD is a convenience wrapper defining extra functions on the metadata. type NiceMD metadata.MD // ExtractIncoming extracts an inbound metadata from the server-side context. @@ -39,7 +39,7 @@ func ExtractOutgoing(ctx context.Context) NiceMD { // Clone performs a *deep* copy of the metadata.MD. // -// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed // all keys get copied. func (m NiceMD) Clone(copiedKeys ...string) NiceMD { newMd := NiceMD(metadata.Pairs()) @@ -61,7 +61,7 @@ func (m NiceMD) Clone(copiedKeys ...string) NiceMD { newMd[k] = make([]string, len(vv)) copy(newMd[k], vv) } - return NiceMD(newMd) + return newMd } // ToOutgoing sets the given NiceMD as a client-side context for dispatching. diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md new file mode 100644 index 0000000000..33686e4da8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -0,0 +1,9 @@ +## 0.7.4 (Jun 6, 2023) + +BUG FIXES + +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194] + +## 0.7.3 (May 15, 2023) + +Initial release diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS new file mode 100644 index 0000000000..f8389c995e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -0,0 +1 @@ +* @hashicorp/release-engineering \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE index e87a115e46..f4f97ee585 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2015 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index adbdd92e3b..cad96bd97b 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package retryablehttp provides a familiar HTTP client interface with // automatic retries and exponential backoff. It is a thin wrapper over the // standard net/http client library and exposes nearly the same public API. @@ -69,11 +72,28 @@ var ( // scheme specified in the URL is invalid. This error isn't typed // specifically so we resort to matching on the error string. schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) ) // ReaderFunc is the type of function that can be given natively to NewRequest type ReaderFunc func() (io.Reader, error) +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the +// CheckRetry function indicates that a retry of the base request is not necessary. +// If an error is returned from this function, the CheckRetry policy will be used to determine +// whether to retry the whole request (including this handler). +// +// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code. +// +// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or +// by the caller out-of-band. Failure to do so will result in a memory leak. +type ResponseHandlerFunc func(*http.Response) error + // LenReader is an interface implemented by many in-memory io.Reader's. Used // for automatically sending the right Content-Length header when possible. type LenReader interface { @@ -86,6 +106,8 @@ type Request struct { // used to rewind the request data in between retries. body ReaderFunc + responseHandler ResponseHandlerFunc + // Embed an HTTP request directly. This makes a *Request act exactly // like an *http.Request so that all meta methods are supported. *http.Request @@ -94,8 +116,16 @@ type Request struct { // WithContext returns wrapped Request with a shallow copy of underlying *http.Request // with its context changed to ctx. The provided ctx must be non-nil. func (r *Request) WithContext(ctx context.Context) *Request { - r.Request = r.Request.WithContext(ctx) - return r + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn } // BodyBytes allows accessing the request body. It is an analogue to @@ -230,10 +260,17 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro if err != nil { return nil, 0, err } - bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil + if len(buf) == 0 { + bodyReader = func() (io.Reader, error) { + return http.NoBody, nil + } + contentLength = 0 + } else { + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) } - contentLength = int64(len(buf)) // No body provided, nothing to do case nil: @@ -252,23 +289,31 @@ func FromRequest(r *http.Request) (*Request, error) { return nil, err } // Could assert contentLength == r.ContentLength - return &Request{bodyReader, r}, nil + return &Request{body: bodyReader, Request: r}, nil } // NewRequest creates a new wrapped request. func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) if err != nil { return nil, err } - httpReq, err := http.NewRequest(method, url, nil) + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } httpReq.ContentLength = contentLength - return &Request{bodyReader, httpReq}, nil + return &Request{body: bodyReader, Request: httpReq}, nil } // Logger interface allows to use other loggers than @@ -435,6 +480,9 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { } // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } if _, ok := v.Err.(x509.UnknownAuthorityError); ok { return false, v } @@ -455,7 +503,7 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { // the server time to recover, as 500's are typically not permanent // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) } @@ -555,13 +603,12 @@ func (c *Client) Do(req *Request) (*http.Response, error) { var resp *http.Response var attempt int var shouldRetry bool - var doErr, checkErr error + var doErr, respErr, checkErr error for i := 0; ; i++ { + doErr, respErr = nil, nil attempt++ - var code int // HTTP response code - // Always rewind the request body when non-nil. if req.body != nil { body, err := req.body() @@ -589,19 +636,24 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // Attempt the request resp, doErr = c.HTTPClient.Do(req.Request) - if resp != nil { - code = resp.StatusCode - } // Check if we should continue with retries. shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } - if doErr != nil { + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { switch v := logger.(type) { case LeveledLogger: - v.Error("request failed", "error", doErr, "method", req.Method, "url", req.URL) + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) case Logger: - v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, doErr) + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) } } else { // Call this here to maintain the behavior of logging all requests, @@ -636,11 +688,11 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) - desc := fmt.Sprintf("%s %s", req.Method, req.URL) - if code > 0 { - desc = fmt.Sprintf("%s (status: %d)", desc, code) - } if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } switch v := logger.(type) { case LeveledLogger: v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) @@ -648,11 +700,13 @@ func (c *Client) Do(req *Request) (*http.Response, error) { v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) } } + timer := time.NewTimer(wait) select { case <-req.Context().Done(): + timer.Stop() c.HTTPClient.CloseIdleConnections() return nil, req.Context().Err() - case <-time.After(wait): + case <-timer.C: } // Make shallow copy of http Request so that we can modify its body @@ -662,15 +716,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } // this is the closest we have to success criteria - if doErr == nil && checkErr == nil && !shouldRetry { + if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { return resp, nil } defer c.HTTPClient.CloseIdleConnections() - err := doErr + var err error if checkErr != nil { err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr } if c.ErrorHandler != nil { diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go index 8f3ee35842..8c407adb3b 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package retryablehttp import ( diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 0000000000..0a1ff9f94d --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! â¤ï¸ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 7e6f7aeee8..ffbbb62c70 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,17 +1,20 @@ # Mergo - -[![GoDoc][3]][4] [![GitHub release][5]][6] [![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] +[![Test status][1]][2] +[![OpenSSF Scorecard][21]][22] +[![OpenSSF Best Practices][19]][20] +[![Coverage status][9]][10] [![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] +[![FOSSA status][13]][14] + +[![GoDoc][3]][4] [![Become my sponsor][15]][16] +[![Tidelift][17]][18] -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo +[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master +[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://img.shields.io/github/release/imdario/mergo.svg @@ -26,6 +29,12 @@ [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield [15]: https://img.shields.io/github/sponsors/imdario [16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo +[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge +[20]: https://bestpractices.coreinfrastructure.org/projects/7177 +[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge +[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -55,7 +64,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont ### Mergo in the wild -- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -231,5 +239,4 @@ Written by [Dario Castañé](http://dario.im). [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 0000000000..a5de61f77b --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index a13a7ee46c..b50d5c2a4e 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } zeroValue := reflect.Value{} switch dst.Kind() { @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } @@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { func _map(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8b4e2f47a0..0ef9b2138c 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool { } type Config struct { + Transformers Transformers Overwrite bool + ShouldNotDereference bool AppendSlice bool TypeCheck bool - Transformers Transformers overwriteWithEmptyValue bool overwriteSliceWithEmptyValue bool sliceDeepCopy bool @@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } // Remember, remember... - visited[h] = &visit{addr, typ, seen} + visited[h] = &visit{typ, seen, addr} } if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { @@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { dst.Set(src) } } @@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if src.Kind() != reflect.Map { - if overwrite { + if overwrite && dst.CanSet() { dst.Set(src) } return @@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { if typeCheck && srcSlice.Type() != dstSlice.Type() { return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } @@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } } - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } case reflect.Slice: if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if src.Kind() != reflect.Interface { if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } } } else if dst.Elem().Type() == src.Type() { if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { @@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { dst.Set(src) } break @@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co break } default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) if mustSet { if dst.CanSet() { dst.Set(src) @@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) { config.overwriteSliceWithEmptyValue = true } +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + // WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true @@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) { func merge(dst, src interface{}, opts ...func(*Config)) error { if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument + return ErrNonPointerArgument } var ( vDst, vSrc reflect.Value diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 9fe362d476..0a721e2d85 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,7 +20,7 @@ var ( ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") + ErrNonPointerArgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -28,13 +28,13 @@ var ( // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { - ptr uintptr typ reflect.Type next *visit + ptr uintptr } // From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 @@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool { if v.IsNil() { return true } - return isEmptyValue(v.Elem()) + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false case reflect.Func: return v.IsNil() case reflect.Invalid: diff --git a/vendor/github.com/ktrysmt/go-bitbucket/bitbucket.go b/vendor/github.com/ktrysmt/go-bitbucket/bitbucket.go index aae77e6610..8f13b11529 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/bitbucket.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/bitbucket.go @@ -139,8 +139,10 @@ type pipelines interface { } type RepositoriesOptions struct { - Owner string `json:"owner"` - Role string `json:"role"` // role=[owner|admin|contributor|member] + Owner string `json:"owner"` + Role string `json:"role"` // role=[owner|admin|contributor|member] + Page *int `json:"page"` + Keyword *string `json:"keyword"` } type RepositoryOptions struct { @@ -339,6 +341,7 @@ type CommitsOptions struct { Include string `json:"include"` Exclude string `json:"exclude"` CommentID string `json:"comment_id"` + Page *int `json:"page"` } type CommitStatusOptions struct { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/branchrestrictions.go b/vendor/github.com/ktrysmt/go-bitbucket/branchrestrictions.go index 72f9eb3e41..09e8c73988 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/branchrestrictions.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/branchrestrictions.go @@ -17,7 +17,7 @@ type BranchRestrictions struct { func (b *BranchRestrictions) Gets(bo *BranchRestrictionsOptions) (interface{}, error) { urlStr := b.c.requestUrl("/repositories/%s/%s/branch-restrictions", bo.Owner, bo.RepoSlug) - return b.c.executePaginated("GET", urlStr, "") + return b.c.executePaginated("GET", urlStr, "", nil) } func (b *BranchRestrictions) Create(bo *BranchRestrictionsOptions) (*BranchRestrictions, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/client.go b/vendor/github.com/ktrysmt/go-bitbucket/client.go index 13f51b5a01..211341e60f 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/client.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/client.go @@ -249,7 +249,7 @@ func (c *Client) execute(method string, urlStr string, text string) (interface{} return result, nil } -func (c *Client) executePaginated(method string, urlStr string, text string) (interface{}, error) { +func (c *Client) executePaginated(method string, urlStr string, text string, page *int) (interface{}, error) { if c.Pagelen != DEFAULT_PAGE_LENGTH { urlObj, err := url.Parse(urlStr) if err != nil { @@ -271,7 +271,7 @@ func (c *Client) executePaginated(method string, urlStr string, text string) (in } c.authenticateRequest(req) - result, err := c.doPaginatedRequest(req, false) + result, err := c.doPaginatedRequest(req, page, false) if err != nil { return nil, err } @@ -358,7 +358,19 @@ func (c *Client) doRequest(req *http.Request, emptyResponse bool) (interface{}, return result, nil } -func (c *Client) doPaginatedRequest(req *http.Request, emptyResponse bool) (interface{}, error) { +func (c *Client) doPaginatedRequest(req *http.Request, page *int, emptyResponse bool) (interface{}, error) { + disableAutoPaging := c.DisableAutoPaging + curPage := 1 + if page != nil { + disableAutoPaging = true + curPage = *page + q := req.URL.Query() + q.Set("page", strconv.Itoa(curPage)) + req.URL.RawQuery = q.Encode() + } + // q.Encode() does not encode "~". + req.URL.RawQuery = strings.ReplaceAll(req.URL.RawQuery, "~", "%7E") + resBody, err := c.doRawRequest(req, emptyResponse) if err != nil { return nil, err @@ -375,18 +387,15 @@ func (c *Client) doPaginatedRequest(req *http.Request, emptyResponse bool) (inte } responsePaginated := &Response{} - var curPage int - err = json.Unmarshal(responseBytes, responsePaginated) if err == nil && len(responsePaginated.Values) > 0 { - var values []interface{} + values := responsePaginated.Values for { - curPage++ - values = append(values, responsePaginated.Values...) - if c.DisableAutoPaging || len(responsePaginated.Next) == 0 || + if disableAutoPaging || responsePaginated.Next == "" || (curPage >= c.LimitPages && c.LimitPages != 0) { break } + curPage++ newReq, err := http.NewRequest(req.Method, responsePaginated.Next, nil) if err != nil { return resBody, err @@ -399,6 +408,7 @@ func (c *Client) doPaginatedRequest(req *http.Request, emptyResponse bool) (inte responsePaginated = &Response{} json.NewDecoder(resp).Decode(responsePaginated) + values = append(values, responsePaginated.Values...) } responsePaginated.Values = values responseBytes, err = json.Marshal(responsePaginated) diff --git a/vendor/github.com/ktrysmt/go-bitbucket/commits.go b/vendor/github.com/ktrysmt/go-bitbucket/commits.go index 1243a7c5d6..166a062914 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/commits.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/commits.go @@ -12,7 +12,7 @@ type Commits struct { func (cm *Commits) GetCommits(cmo *CommitsOptions) (interface{}, error) { urlStr := cm.c.requestUrl("/repositories/%s/%s/commits/%s", cmo.Owner, cmo.RepoSlug, cmo.Branchortag) urlStr += cm.buildCommitsQuery(cmo.Include, cmo.Exclude) - return cm.c.executePaginated("GET", urlStr, "") + return cm.c.executePaginated("GET", urlStr, "", cmo.Page) } func (cm *Commits) GetCommit(cmo *CommitsOptions) (interface{}, error) { @@ -22,7 +22,7 @@ func (cm *Commits) GetCommit(cmo *CommitsOptions) (interface{}, error) { func (cm *Commits) GetCommitComments(cmo *CommitsOptions) (interface{}, error) { urlStr := cm.c.requestUrl("/repositories/%s/%s/commit/%s/comments", cmo.Owner, cmo.RepoSlug, cmo.Revision) - return cm.c.executePaginated("GET", urlStr, "") + return cm.c.executePaginated("GET", urlStr, "", nil) } func (cm *Commits) GetCommitComment(cmo *CommitsOptions) (interface{}, error) { @@ -32,7 +32,7 @@ func (cm *Commits) GetCommitComment(cmo *CommitsOptions) (interface{}, error) { func (cm *Commits) GetCommitStatuses(cmo *CommitsOptions) (interface{}, error) { urlStr := cm.c.requestUrl("/repositories/%s/%s/commit/%s/statuses", cmo.Owner, cmo.RepoSlug, cmo.Revision) - return cm.c.executePaginated("GET", urlStr, "") + return cm.c.executePaginated("GET", urlStr, "", nil) } func (cm *Commits) GetCommitStatus(cmo *CommitsOptions, commitStatusKey string) (interface{}, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/downloads.go b/vendor/github.com/ktrysmt/go-bitbucket/downloads.go index 4d82c94bbc..b8b9939e01 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/downloads.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/downloads.go @@ -11,5 +11,5 @@ func (dl *Downloads) Create(do *DownloadsOptions) (interface{}, error) { func (dl *Downloads) List(do *DownloadsOptions) (interface{}, error) { urlStr := dl.c.requestUrl("/repositories/%s/%s/downloads", do.Owner, do.RepoSlug) - return dl.c.executePaginated("GET", urlStr, "") + return dl.c.executePaginated("GET", urlStr, "", nil) } diff --git a/vendor/github.com/ktrysmt/go-bitbucket/issues.go b/vendor/github.com/ktrysmt/go-bitbucket/issues.go index d5987c1451..d63cb3ebed 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/issues.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/issues.go @@ -37,7 +37,7 @@ func (p *Issues) Gets(io *IssuesOptions) (interface{}, error) { url.RawQuery = query.Encode() } - return p.c.executePaginated("GET", url.String(), "") + return p.c.executePaginated("GET", url.String(), "", nil) } func (p *Issues) Get(io *IssuesOptions) (interface{}, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/pipelines.go b/vendor/github.com/ktrysmt/go-bitbucket/pipelines.go index a54ea35c53..9588e4a16d 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/pipelines.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/pipelines.go @@ -46,7 +46,7 @@ func (p *Pipelines) List(po *PipelinesOptions) (interface{}, error) { urlStr = parsed.String() } - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *Pipelines) Get(po *PipelinesOptions) (interface{}, error) { @@ -90,7 +90,7 @@ func (p *Pipelines) ListSteps(po *PipelinesOptions) (interface{}, error) { urlStr = parsed.String() } - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *Pipelines) GetStep(po *PipelinesOptions) (interface{}, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/pullrequests.go b/vendor/github.com/ktrysmt/go-bitbucket/pullrequests.go index 10ca289db7..55ec5b7d9e 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/pullrequests.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/pullrequests.go @@ -65,7 +65,7 @@ func (p *PullRequests) Gets(po *PullRequestsOptions) (interface{}, error) { urlStr = parsed.String() } - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *PullRequests) Get(po *PullRequestsOptions) (interface{}, error) { @@ -75,7 +75,7 @@ func (p *PullRequests) Get(po *PullRequestsOptions) (interface{}, error) { func (p *PullRequests) Activities(po *PullRequestsOptions) (interface{}, error) { urlStr := p.c.GetApiBaseURL() + "/repositories/" + po.Owner + "/" + po.RepoSlug + "/pullrequests/activity" - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *PullRequests) Activity(po *PullRequestsOptions) (interface{}, error) { @@ -85,7 +85,7 @@ func (p *PullRequests) Activity(po *PullRequestsOptions) (interface{}, error) { func (p *PullRequests) Commits(po *PullRequestsOptions) (interface{}, error) { urlStr := p.c.GetApiBaseURL() + "/repositories/" + po.Owner + "/" + po.RepoSlug + "/pullrequests/" + po.ID + "/commits" - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *PullRequests) Patch(po *PullRequestsOptions) (interface{}, error) { @@ -158,7 +158,7 @@ func (p *PullRequests) UpdateComment(co *PullRequestCommentOptions) (interface{} func (p *PullRequests) GetComments(po *PullRequestsOptions) (interface{}, error) { urlStr := p.c.GetApiBaseURL() + "/repositories/" + po.Owner + "/" + po.RepoSlug + "/pullrequests/" + po.ID + "/comments/" - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *PullRequests) GetComment(po *PullRequestsOptions) (interface{}, error) { @@ -189,7 +189,7 @@ func (p *PullRequests) Statuses(po *PullRequestsOptions) (interface{}, error) { parsed.RawQuery = query.Encode() urlStr = parsed.String() } - return p.c.executePaginated("GET", urlStr, "") + return p.c.executePaginated("GET", urlStr, "", nil) } func (p *PullRequests) buildPullRequestBody(po *PullRequestsOptions) (string, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/repositories.go b/vendor/github.com/ktrysmt/go-bitbucket/repositories.go index 01a4e44faa..4bd6577496 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/repositories.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/repositories.go @@ -3,8 +3,6 @@ package bitbucket import ( "errors" "fmt" - - "github.com/mitchellh/mapstructure" ) //"github.com/k0kubun/pp" @@ -32,15 +30,22 @@ type RepositoriesRes struct { } func (r *Repositories) ListForAccount(ro *RepositoriesOptions) (*RepositoriesRes, error) { - url := "/repositories" + urlPath := "/repositories" if ro.Owner != "" { - url += fmt.Sprintf("/%s", ro.Owner) + urlPath += fmt.Sprintf("/%s", ro.Owner) } - urlStr := r.c.requestUrl(url) + urlStr := r.c.requestUrl(urlPath) if ro.Role != "" { urlStr += "?role=" + ro.Role } - repos, err := r.c.executePaginated("GET", urlStr, "") + if ro.Keyword != nil && *ro.Keyword != "" { + if ro.Role == "" { + urlStr += "?" + } + // https://developer.atlassian.com/cloud/bitbucket/rest/intro/#operators + urlStr += fmt.Sprintf("q=full_name ~ \"%s\"", *ro.Keyword) + } + repos, err := r.c.executePaginated("GET", urlStr, "", ro.Page) if err != nil { return nil, err } @@ -54,7 +59,7 @@ func (r *Repositories) ListForTeam(ro *RepositoriesOptions) (*RepositoriesRes, e func (r *Repositories) ListPublic() (*RepositoriesRes, error) { urlStr := r.c.requestUrl("/repositories/") - repos, err := r.c.executePaginated("GET", urlStr, "") + repos, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -70,10 +75,9 @@ func decodeRepositories(reposResponse interface{}) (*RepositoriesRes, error) { repoArray := reposResponseMap["values"].([]interface{}) var repos []Repository for _, repoEntry := range repoArray { - var repo Repository - err := mapstructure.Decode(repoEntry, &repo) + repo, err := decodeRepository(repoEntry) if err == nil { - repos = append(repos, repo) + repos = append(repos, *repo) } } diff --git a/vendor/github.com/ktrysmt/go-bitbucket/repository.go b/vendor/github.com/ktrysmt/go-bitbucket/repository.go index 5f8f74e8e0..4ec24f9660 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/repository.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/repository.go @@ -9,6 +9,7 @@ import ( "path" "strconv" "strings" + "time" "github.com/mitchellh/mapstructure" ) @@ -29,11 +30,15 @@ type Repository struct { Has_wiki bool Mainbranch RepositoryBranch Type string - CreatedOn string `mapstructure:"created_on"` - UpdatedOn string `mapstructure:"updated_on"` - Owner map[string]interface{} - Links map[string]interface{} - Parent *Repository + // Deprecated: CreatedOn is deprecated use CreatedOnTime + CreatedOn string `mapstructure:"created_on"` + // Deprecated: UpdatedOn is deprecated use UpdatedOnTime + UpdatedOn string `mapstructure:"updated_on"` + Owner map[string]interface{} + Links map[string]interface{} + Parent *Repository + CreatedOnTime *time.Time `mapstructure:"created_on"` + UpdatedOnTime *time.Time `mapstructure:"updated_on"` } type RepositoryFile struct { @@ -249,6 +254,8 @@ type UserPermissions struct { UserPermissions []UserPermission } +var stringToTimeHookFunc = mapstructure.StringToTimeHookFunc("2006-01-02T15:04:05.000000+00:00") + func (r *Repository) Create(ro *RepositoryOptions) (*Repository, error) { data, err := r.buildRepositoryBody(ro) if err != nil { @@ -324,7 +331,7 @@ func (r *Repository) ListFiles(ro *RepositoryFilesOptions) ([]RepositoryFile, er return nil, err } - response, err := r.c.executePaginated("GET", urlStr, "") + response, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -522,7 +529,7 @@ func (r *Repository) ListTags(rbo *RepositoryTagOptions) (*RepositoryTags, error } urlStr := r.c.requestUrl("/repositories/%s/%s/refs/tags?%s", rbo.Owner, rbo.RepoSlug, params.Encode()) - response, err := r.c.executePaginated("GET", urlStr, "") + response, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -578,18 +585,18 @@ func (r *Repository) Delete(ro *RepositoryOptions) (interface{}, error) { func (r *Repository) ListWatchers(ro *RepositoryOptions) (interface{}, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/watchers", ro.Owner, ro.RepoSlug) - return r.c.executePaginated("GET", urlStr, "") + return r.c.executePaginated("GET", urlStr, "", nil) } func (r *Repository) ListForks(ro *RepositoryOptions) (interface{}, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/forks", ro.Owner, ro.RepoSlug) - return r.c.executePaginated("GET", urlStr, "") + return r.c.executePaginated("GET", urlStr, "", nil) } func (r *Repository) ListDefaultReviewers(ro *RepositoryOptions) (*DefaultReviewers, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/default-reviewers?pagelen=1", ro.Owner, ro.RepoSlug) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -893,7 +900,7 @@ func (r *Repository) UpdateDeploymentVariable(opt *RepositoryDeploymentVariableO func (r *Repository) ListGroupPermissions(ro *RepositoryOptions) (*GroupPermissions, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/permissions-config/groups?pagelen=1", ro.Owner, ro.RepoSlug) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -924,7 +931,7 @@ func (r *Repository) DeleteGroupPermissions(rgo *RepositoryGroupPermissionsOptio func (r *Repository) GetGroupPermissions(rgo *RepositoryGroupPermissionsOptions) (*GroupPermission, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/permissions-config/groups/%s", rgo.Owner, rgo.RepoSlug, rgo.Group) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -934,7 +941,7 @@ func (r *Repository) GetGroupPermissions(rgo *RepositoryGroupPermissionsOptions) func (r *Repository) ListUserPermissions(ro *RepositoryOptions) (*UserPermissions, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/permissions-config/users?pagelen=1", ro.Owner, ro.RepoSlug) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -965,7 +972,7 @@ func (r *Repository) DeleteUserPermissions(rgo *RepositoryUserPermissionsOptions func (r *Repository) GetUserPermissions(rgo *RepositoryUserPermissionsOptions) (*UserPermission, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/permissions-config/users/%s", rgo.Owner, rgo.RepoSlug, rgo.User) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -1190,7 +1197,15 @@ func decodeRepository(repoResponse interface{}) (*Repository, error) { } var repository = new(Repository) - err := mapstructure.Decode(repoMap, repository) + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: nil, + Result: repository, + DecodeHook: stringToTimeHookFunc, + }) + if err != nil { + return nil, err + } + err = decoder.Decode(repoMap) if err != nil { return nil, err } diff --git a/vendor/github.com/ktrysmt/go-bitbucket/webhooks.go b/vendor/github.com/ktrysmt/go-bitbucket/webhooks.go index 3614418aaf..3c1aa3375c 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/webhooks.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/webhooks.go @@ -74,7 +74,7 @@ func (r *Webhooks) buildWebhooksBody(ro *WebhooksOptions) (string, error) { func (r *Webhooks) List(ro *WebhooksOptions) ([]Webhook, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/hooks/", ro.Owner, ro.RepoSlug) - res, err := r.c.executePaginated("GET", urlStr, "") + res, err := r.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -84,7 +84,7 @@ func (r *Webhooks) List(ro *WebhooksOptions) ([]Webhook, error) { // Deprecate Gets for List call func (r *Webhooks) Gets(ro *WebhooksOptions) (interface{}, error) { urlStr := r.c.requestUrl("/repositories/%s/%s/hooks/", ro.Owner, ro.RepoSlug) - return r.c.executePaginated("GET", urlStr, "") + return r.c.executePaginated("GET", urlStr, "", nil) } func (r *Webhooks) Create(ro *WebhooksOptions) (*Webhook, error) { diff --git a/vendor/github.com/ktrysmt/go-bitbucket/workspaces.go b/vendor/github.com/ktrysmt/go-bitbucket/workspaces.go index e9b17adb53..4c27afbd92 100644 --- a/vendor/github.com/ktrysmt/go-bitbucket/workspaces.go +++ b/vendor/github.com/ktrysmt/go-bitbucket/workspaces.go @@ -52,7 +52,7 @@ type WorkspaceMembers struct { func (t *Permission) GetUserPermissions(organization, member string) (*Permission, error) { urlStr := t.c.requestUrl("/workspaces/%s/permissions?q=user.nickname=\"%s\"", organization, member) - response, err := t.c.executePaginated("GET", urlStr, "") + response, err := t.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -62,7 +62,7 @@ func (t *Permission) GetUserPermissions(organization, member string) (*Permissio func (t *Permission) GetUserPermissionsByUuid(organization, member string) (*Permission, error) { urlStr := t.c.requestUrl("/workspaces/%s/permissions?q=user.uuid=\"%s\"", organization, member) - response, err := t.c.executePaginated("GET", urlStr, "") + response, err := t.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -72,7 +72,7 @@ func (t *Permission) GetUserPermissionsByUuid(organization, member string) (*Per func (t *Workspace) List() (*WorkspaceList, error) { urlStr := t.c.requestUrl("/workspaces") - response, err := t.c.executePaginated("GET", urlStr, "") + response, err := t.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (t *Workspace) Get(workspace string) (*Workspace, error) { func (w *Workspace) Members(teamname string) (*WorkspaceMembers, error) { urlStr := w.c.requestUrl("/workspaces/%s/members", teamname) - response, err := w.c.executePaginated("GET", urlStr, "") + response, err := w.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } @@ -102,7 +102,7 @@ func (w *Workspace) Members(teamname string) (*WorkspaceMembers, error) { func (w *Workspace) Projects(teamname string) (*ProjectsRes, error) { urlStr := w.c.requestUrl("/workspaces/%s/projects/", teamname) - response, err := w.c.executePaginated("GET", urlStr, "") + response, err := w.c.executePaginated("GET", urlStr, "", nil) if err != nil { return nil, err } diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 0000000000..9fdc20fdb6 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 0000000000..e628920460 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,68 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 0000000000..36b0aeb8f1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,111 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped + // + // Deprecated: This field is present only for legacy compatibility with + // Docker and should not be used by new image builders. It is used by Docker + // for Windows images to indicate that the `Entrypoint` or `Cmd` or both, + // contains only a single element array, that is a pre-escaped, and combined + // into a single string `CommandLine`. If `true` the value in `Entrypoint` or + // `Cmd` should be used as-is to avoid double escaping. + // https://github.com/opencontainers/image-spec/pull/892 + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + Platform + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 0000000000..9654aa5af6 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,72 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 0000000000..ed4a56e59e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 0000000000..fc79e9e0d1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 0000000000..4ce7b54ccd --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,49 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ScratchDescriptor is the descriptor of a blob with content of `{}`. +var ScratchDescriptor = Descriptor{ + MediaType: MediaTypeScratch, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 0000000000..5dd31255eb --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + // + // Deprecated: Non-distributable layers are deprecated, and not recommended + // for future use. Implementations SHOULD NOT produce new non-distributable + // layers. + // https://github.com/opencontainers/image-spec/pull/965 + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` + MediaTypeScratch = "application/vnd.oci.scratch.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 0000000000..3d4119b441 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 1 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc.3" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 0000000000..58a1510f33 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go index f74139c71f..1cfe8d863c 100644 --- a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -35,11 +35,15 @@ import ( ) func init() { - json.RegisterTypeEncoderFunc("model.SamplePair", marshalPointJSON, marshalPointJSONIsEmpty) - json.RegisterTypeDecoderFunc("model.SamplePair", unMarshalPointJSON) + json.RegisterTypeEncoderFunc("model.SamplePair", marshalSamplePairJSON, marshalJSONIsEmpty) + json.RegisterTypeDecoderFunc("model.SamplePair", unmarshalSamplePairJSON) + json.RegisterTypeEncoderFunc("model.SampleHistogramPair", marshalSampleHistogramPairJSON, marshalJSONIsEmpty) + json.RegisterTypeDecoderFunc("model.SampleHistogramPair", unmarshalSampleHistogramPairJSON) + json.RegisterTypeEncoderFunc("model.SampleStream", marshalSampleStreamJSON, marshalJSONIsEmpty) // Only needed for benchmark. + json.RegisterTypeDecoderFunc("model.SampleStream", unmarshalSampleStreamJSON) // Only needed for benchmark. } -func unMarshalPointJSON(ptr unsafe.Pointer, iter *json.Iterator) { +func unmarshalSamplePairJSON(ptr unsafe.Pointer, iter *json.Iterator) { p := (*model.SamplePair)(ptr) if !iter.ReadArray() { iter.ReportError("unmarshal model.SamplePair", "SamplePair must be [timestamp, value]") @@ -68,12 +72,165 @@ func unMarshalPointJSON(ptr unsafe.Pointer, iter *json.Iterator) { } } -func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) { +func marshalSamplePairJSON(ptr unsafe.Pointer, stream *json.Stream) { p := *((*model.SamplePair)(ptr)) stream.WriteArrayStart() + marshalTimestamp(p.Timestamp, stream) + stream.WriteMore() + marshalFloat(float64(p.Value), stream) + stream.WriteArrayEnd() +} + +func unmarshalSampleHistogramPairJSON(ptr unsafe.Pointer, iter *json.Iterator) { + p := (*model.SampleHistogramPair)(ptr) + if !iter.ReadArray() { + iter.ReportError("unmarshal model.SampleHistogramPair", "SampleHistogramPair must be [timestamp, {histogram}]") + return + } + t := iter.ReadNumber() + if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil { + iter.ReportError("unmarshal model.SampleHistogramPair", err.Error()) + return + } + if !iter.ReadArray() { + iter.ReportError("unmarshal model.SampleHistogramPair", "SamplePair missing histogram") + return + } + h := &model.SampleHistogram{} + p.Histogram = h + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + switch key { + case "count": + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("unmarshal model.SampleHistogramPair", "count of histogram is not a float") + return + } + h.Count = model.FloatString(f) + case "sum": + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("unmarshal model.SampleHistogramPair", "sum of histogram is not a float") + return + } + h.Sum = model.FloatString(f) + case "buckets": + for iter.ReadArray() { + b, err := unmarshalHistogramBucket(iter) + if err != nil { + iter.ReportError("unmarshal model.HistogramBucket", err.Error()) + return + } + h.Buckets = append(h.Buckets, b) + } + default: + iter.ReportError("unmarshal model.SampleHistogramPair", fmt.Sprint("unexpected key in histogram:", key)) + return + } + } + if iter.ReadArray() { + iter.ReportError("unmarshal model.SampleHistogramPair", "SampleHistogramPair has too many values, must be [timestamp, {histogram}]") + return + } +} + +func marshalSampleHistogramPairJSON(ptr unsafe.Pointer, stream *json.Stream) { + p := *((*model.SampleHistogramPair)(ptr)) + stream.WriteArrayStart() + marshalTimestamp(p.Timestamp, stream) + stream.WriteMore() + marshalHistogram(*p.Histogram, stream) + stream.WriteArrayEnd() +} + +func unmarshalSampleStreamJSON(ptr unsafe.Pointer, iter *json.Iterator) { + ss := (*model.SampleStream)(ptr) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + switch key { + case "metric": + metricString := iter.ReadAny().ToString() + if err := json.UnmarshalFromString(metricString, &ss.Metric); err != nil { + iter.ReportError("unmarshal model.SampleStream", err.Error()) + return + } + case "values": + for iter.ReadArray() { + v := model.SamplePair{} + unmarshalSamplePairJSON(unsafe.Pointer(&v), iter) + ss.Values = append(ss.Values, v) + } + case "histograms": + for iter.ReadArray() { + h := model.SampleHistogramPair{} + unmarshalSampleHistogramPairJSON(unsafe.Pointer(&h), iter) + ss.Histograms = append(ss.Histograms, h) + } + default: + iter.ReportError("unmarshal model.SampleStream", fmt.Sprint("unexpected key:", key)) + return + } + } +} + +func marshalSampleStreamJSON(ptr unsafe.Pointer, stream *json.Stream) { + ss := *((*model.SampleStream)(ptr)) + stream.WriteObjectStart() + stream.WriteObjectField(`metric`) + m, err := json.ConfigCompatibleWithStandardLibrary.Marshal(ss.Metric) + if err != nil { + stream.Error = err + return + } + stream.SetBuffer(append(stream.Buffer(), m...)) + if len(ss.Values) > 0 { + stream.WriteMore() + stream.WriteObjectField(`values`) + stream.WriteArrayStart() + for i, v := range ss.Values { + if i > 0 { + stream.WriteMore() + } + marshalSamplePairJSON(unsafe.Pointer(&v), stream) + } + stream.WriteArrayEnd() + } + if len(ss.Histograms) > 0 { + stream.WriteMore() + stream.WriteObjectField(`histograms`) + stream.WriteArrayStart() + for i, h := range ss.Histograms { + if i > 0 { + stream.WriteMore() + } + marshalSampleHistogramPairJSON(unsafe.Pointer(&h), stream) + } + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +func marshalFloat(v float64, stream *json.Stream) { + stream.WriteRaw(`"`) + // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround + // to https://github.com/json-iterator/go/issues/365 (json-iterator, to follow json standard, doesn't allow inf/nan). + buf := stream.Buffer() + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + buf = strconv.AppendFloat(buf, v, fmt, -1, 64) + stream.SetBuffer(buf) + stream.WriteRaw(`"`) +} + +func marshalTimestamp(timestamp model.Time, stream *json.Stream) { + t := int64(timestamp) // Write out the timestamp as a float divided by 1000. // This is ~3x faster than converting to a float. - t := int64(p.Timestamp) if t < 0 { stream.WriteRaw(`-`) t = -t @@ -90,28 +247,113 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) { } stream.WriteInt64(fraction) } - stream.WriteMore() - stream.WriteRaw(`"`) +} - // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround - // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan) - buf := stream.Buffer() - abs := math.Abs(float64(p.Value)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } +func unmarshalHistogramBucket(iter *json.Iterator) (*model.HistogramBucket, error) { + b := model.HistogramBucket{} + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") } - buf = strconv.AppendFloat(buf, float64(p.Value), fmt, -1, 64) - stream.SetBuffer(buf) + boundaries, err := iter.ReadNumber().Int64() + if err != nil { + return nil, err + } + b.Boundaries = int32(boundaries) + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Lower = model.FloatString(f) + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err = strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Upper = model.FloatString(f) + if !iter.ReadArray() { + return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]") + } + f, err = strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + return nil, err + } + b.Count = model.FloatString(f) + if iter.ReadArray() { + return nil, errors.New("HistogramBucket has too many values, must be [boundaries, lower, upper, count]") + } + return &b, nil +} - stream.WriteRaw(`"`) +// marshalHistogramBucket writes something like: [ 3, "-0.25", "0.25", "3"] +// See marshalHistogram to understand what the numbers mean +func marshalHistogramBucket(b model.HistogramBucket, stream *json.Stream) { + stream.WriteArrayStart() + stream.WriteInt32(b.Boundaries) + stream.WriteMore() + marshalFloat(float64(b.Lower), stream) + stream.WriteMore() + marshalFloat(float64(b.Upper), stream) + stream.WriteMore() + marshalFloat(float64(b.Count), stream) stream.WriteArrayEnd() } -func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { +// marshalHistogram writes something like: +// +// { +// "count": "42", +// "sum": "34593.34", +// "buckets": [ +// [ 3, "-0.25", "0.25", "3"], +// [ 0, "0.25", "0.5", "12"], +// [ 0, "0.5", "1", "21"], +// [ 0, "2", "4", "6"] +// ] +// } +// +// The 1st element in each bucket array determines if the boundaries are +// inclusive (AKA closed) or exclusive (AKA open): +// +// 0: lower exclusive, upper inclusive +// 1: lower inclusive, upper exclusive +// 2: both exclusive +// 3: both inclusive +// +// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is +// the bucket count. +func marshalHistogram(h model.SampleHistogram, stream *json.Stream) { + stream.WriteObjectStart() + stream.WriteObjectField(`count`) + marshalFloat(float64(h.Count), stream) + stream.WriteMore() + stream.WriteObjectField(`sum`) + marshalFloat(float64(h.Sum), stream) + + bucketFound := false + for _, bucket := range h.Buckets { + if bucket.Count == 0 { + continue // No need to expose empty buckets in JSON. + } + stream.WriteMore() + if !bucketFound { + stream.WriteObjectField(`buckets`) + stream.WriteArrayStart() + } + bucketFound = true + marshalHistogramBucket(*bucket, stream) + } + if bucketFound { + stream.WriteArrayEnd() + } + stream.WriteObjectEnd() +} + +func marshalJSONIsEmpty(ptr unsafe.Pointer) bool { return false } @@ -650,7 +892,8 @@ func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) { } var res AlertsResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { @@ -667,7 +910,8 @@ func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error } var res AlertManagersResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) CleanTombstones(ctx context.Context) error { @@ -696,7 +940,8 @@ func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { } var res ConfigResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error { @@ -707,8 +952,12 @@ func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, q.Add("match[]", m) } - q.Set("start", formatTime(startTime)) - q.Set("end", formatTime(endTime)) + if !startTime.IsZero() { + q.Set("start", formatTime(startTime)) + } + if !endTime.IsZero() { + q.Set("end", formatTime(endTime)) + } u.RawQuery = q.Encode() @@ -735,7 +984,8 @@ func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { } var res FlagsResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) Buildinfo(ctx context.Context) (BuildinfoResult, error) { @@ -752,7 +1002,8 @@ func (h *httpAPI) Buildinfo(ctx context.Context) (BuildinfoResult, error) { } var res BuildinfoResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { @@ -769,37 +1020,41 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { } var res RuntimeinfoResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) q := u.Query() - q.Set("start", formatTime(startTime)) - q.Set("end", formatTime(endTime)) + if !startTime.IsZero() { + q.Set("start", formatTime(startTime)) + } + if !endTime.IsZero() { + q.Set("end", formatTime(endTime)) + } for _, m := range matches { q.Add("match[]", m) } - u.RawQuery = q.Encode() - - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err - } - _, body, w, err := h.client.Do(ctx, req) + _, body, w, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, w, err } var labelNames []string - return labelNames, w, json.Unmarshal(body, &labelNames) + err = json.Unmarshal(body, &labelNames) + return labelNames, w, err } func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) q := u.Query() - q.Set("start", formatTime(startTime)) - q.Set("end", formatTime(endTime)) + if !startTime.IsZero() { + q.Set("start", formatTime(startTime)) + } + if !endTime.IsZero() { + q.Set("end", formatTime(endTime)) + } for _, m := range matches { q.Add("match[]", m) } @@ -815,7 +1070,8 @@ func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []strin return nil, w, err } var labelValues model.LabelValues - return labelValues, w, json.Unmarshal(body, &labelValues) + err = json.Unmarshal(body, &labelValues) + return labelValues, w, err } type apiOptions struct { @@ -897,23 +1153,21 @@ func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTi q.Add("match[]", m) } - q.Set("start", formatTime(startTime)) - q.Set("end", formatTime(endTime)) - - u.RawQuery = q.Encode() - - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, err + if !startTime.IsZero() { + q.Set("start", formatTime(startTime)) + } + if !endTime.IsZero() { + q.Set("end", formatTime(endTime)) } - _, body, warnings, err := h.client.Do(ctx, req) + _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var mset []model.LabelSet - return mset, warnings, json.Unmarshal(body, &mset) + err = json.Unmarshal(body, &mset) + return mset, warnings, err } func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { @@ -935,7 +1189,8 @@ func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, } var res SnapshotResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) { @@ -952,7 +1207,8 @@ func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) { } var res RulesResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { @@ -969,7 +1225,8 @@ func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { } var res TargetsResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) { @@ -993,7 +1250,8 @@ func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limi } var res []MetricMetadata - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) { @@ -1016,7 +1274,8 @@ func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[strin } var res map[string][]Metadata - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { @@ -1033,7 +1292,8 @@ func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { } var res TSDBResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) { @@ -1050,7 +1310,8 @@ func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) { } var res WalReplayStatus - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) { @@ -1058,22 +1319,21 @@ func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, e q := u.Query() q.Set("query", query) - q.Set("start", formatTime(startTime)) - q.Set("end", formatTime(endTime)) - u.RawQuery = q.Encode() - - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err + if !startTime.IsZero() { + q.Set("start", formatTime(startTime)) + } + if !endTime.IsZero() { + q.Set("end", formatTime(endTime)) } - _, body, _, err := h.client.Do(ctx, req) + _, body, _, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, err } var res []ExemplarQueryResult - return res, json.Unmarshal(body, &res) + err = json.Unmarshal(body, &res) + return res, err } // Warnings is an array of non critical errors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index 246c5ea943..2f5616894e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -28,6 +28,8 @@ var ( MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} // MetricsGC allows only GC metrics to be collected from Go runtime. // e.g. go_gc_cycles_automatic_gc_cycles_total + // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics. + // Use custom metric rule to access those. MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} // MetricsMemory allows only memory metrics to be collected from Go runtime. // e.g. go_memory_classes_heap_free_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index a912b75a05..62de4dc59a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -59,6 +59,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -174,16 +186,24 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 8bc5e44e2f..deedc2dfbe 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,20 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" "github.com/cespare/xxhash/v2" - - "github.com/prometheus/client_golang/prometheus/internal" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/client_golang/prometheus/internal" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -54,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels ConstrainedLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -80,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.constrainedLabels(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels { + if !checkLabelName(label.Name) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label.Name) + labelNameSet[label.Name] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 811072cbd5..962608f02c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -37,35 +37,35 @@ // // type metrics struct { // cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec +// hdFailures *prometheus.CounterVec // } // // func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m // } // // func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() +// // Create a non-global registry. +// reg := prometheus.NewRegistry() // -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. // m.cpuTemp.Set(65.3) // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index 21271a5bb4..f1ea6c76f7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 3a2d55e84b..2d8d9f64f4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -23,11 +23,10 @@ import ( "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/client_golang/prometheus/internal" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 4c873a01c3..8d818afe90 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,10 +22,9 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -402,7 +401,7 @@ type HistogramOpts struct { // Histogram by a Prometheus server with that feature enabled (requires // Prometheus v2.40+). Sparse buckets are exponential buckets covering // the whole float64 range (with the exception of the “zero†bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, + // NativeHistogramZeroThreshold below). From any one bucket to the next, // the width of the bucket grows by a constant // factor. NativeHistogramBucketFactor provides an upper bound for this // factor (exception see below). The smaller @@ -433,7 +432,7 @@ type HistogramOpts struct { // bucket. For best results, this should be close to a bucket // boundary. This is usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // DefNativeHistogramZeroThreshold is used as the threshold. To configure // a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero @@ -469,6 +468,18 @@ type HistogramOpts struct { NativeHistogramMaxZeroThreshold float64 } +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. // @@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == bucketLabel { + if n.Name == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -632,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if frac == 0.5 { key-- } - div := 1 << -schema - key = (key + div - 1) / div + offset := (1 << -schema) - 1 + key = (key + offset) >> -schema } if isInf { key++ @@ -810,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) { } } -// limitSparsebuckets applies a strategy to limit the number of populated sparse +// limitBuckets applies a strategy to limit the number of populated sparse // buckets. It's generally best effort, and there are situations where the // number can go higher (if even the lowest resolution isn't enough to reduce // the number sufficiently, or if the provided counts aren't fully updated yet @@ -1034,15 +1041,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index c1b8fad36a..63ff8683ce 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,6 +32,78 @@ import ( // create a Desc. type Labels map[string]string +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint func(string) string +} + +func (cl ConstrainedLabel) Constrain(v string) string { + if cl.Constraint == nil { + return v + } + return cl.Constraint(v) +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + constrainedLabels() ConstrainedLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { + return cls +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { + constrainedLabels := make([]ConstrainedLabel, len(uls)) + for i, l := range uls { + constrainedLabels[i] = ConstrainedLabel{Name: l} + } + return constrainedLabels +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index b5119c5041..07bbc9d768 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -20,11 +20,9 @@ import ( "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index 8031e87042..fa90115921 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -28,30 +28,30 @@ // package main // // import ( -// "math/rand" -// "net/http" +// "math/rand" +// "net/http" // -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promauto" +// "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), +// Name: "random_numbers", +// Help: "A histogram of normally distributed random numbers.", +// Buckets: prometheus.LinearBuckets(-3, .1, 61), // }) // // func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } +// for { +// histogram.Observe(rand.NormFloat64()) +// } // } // // func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) +// go Random() +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":1971", nil) // } // // Prometheus's version of a minimal hello-world program: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index a4cc9810b0..09b8d2fbea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -37,6 +37,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "sync" "time" @@ -47,9 +48,10 @@ import ( ) const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" + processStartTimeHeader = "Process-Start-Time-Unix" ) var gzipPool = sync.Pool{ @@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if !opts.ProcessStartTime.IsZero() { + rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) + } if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. @@ -366,6 +371,14 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // ProcessStartTime allows setting process start timevalue that will be exposed + // with "Process-Start-Time-Unix" response header along with the metrics + // payload. This allow callers to have efficient transformations to cumulative + // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per + // scrape target. + // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus + // exposition format. + ProcessStartTime time.Time } // gzipAccepted returns whether the client will accept gzip-encoded content. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 2108678162..d3482c40ca 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou o.apply(rtOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - addWithExemplar( - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - 1, - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) } return resp, err } @@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT o.apply(rtOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - observeWithExemplar( - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - time.Since(start).Seconds(), - rtOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) } return resp, err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index cca67a78a9..3793036ad0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { @@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) } } @@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(counter) + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) } } @@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - observeWithExemplar( - obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) }) next.ServeHTTP(d, r) } @@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + if code { return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) } } @@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler o.apply(hOpts) } - code, method := checkLabels(obs) + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(d.Written()), - hOpts.getExemplarFn(r.Context()), - ) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index c590d912c9..5d4383aa14 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -24,14 +24,32 @@ type Option interface { apply(*options) } +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + // options store options for both a handler or round tripper. type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx } func defaultOptions() *options { - return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } +} + +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels } type optionApplyFunc func(*options) @@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option { }) } -// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. -// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric -// will get instrumented without exemplar. +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { return optionApplyFunc(func(o *options) { o.getExemplarFn = getExemplarFn }) } + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 09e34d307c..44da9433be 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -21,18 +21,17 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -933,6 +932,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -962,7 +965,7 @@ func checkDescConsistency( copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), + Name: proto.String(l.Name), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 7bc448a893..dd359264e5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,10 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" ) // quantileLabel is used for the label that defines the quantile in a @@ -148,6 +147,18 @@ type SummaryOpts struct { BufCap uint32 } +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Contraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging @@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) } for _, n := range desc.variableLabels { - if n == quantileLabel { + if n.Name == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -530,20 +541,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile†is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go index f28a76f3a6..52344fef53 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,7 +23,9 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: // // func TimeMe() { @@ -31,6 +33,14 @@ type Timer struct { // defer timer.ObserveDuration() // // Do actual work. // } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 2d3abc1cbd..5f6bb80014 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,13 +19,11 @@ import ( "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l.Name), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 7ae322590c..f0d0015a0f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,6 +20,24 @@ import ( "github.com/prometheus/common/model" ) +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} + +func getLabelsFromPool() Labels { + return labelsPool.Get().(Labels) +} + +func putLabelsToPool(labels Labels) { + for k := range labels { + delete(labels, k) + } + + labelsPool.Put(labels) +} + // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -72,6 +90,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +110,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return false @@ -106,6 +128,9 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + return m.metricMap.deleteByLabels(labels, m.curry) } @@ -145,10 +170,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", label.Name) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -156,7 +181,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -199,6 +224,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -224,6 +250,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels = constrainLabels(m.desc, labels) + defer putLabelsToPool(labels) + h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -266,16 +295,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { iCurry int ) for i, label := range m.desc.variableLabels { - val, ok := labels[label] + val, ok := labels[label.Name] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", label.Name) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", label.Name) } h = m.hashAdd(h, val) } @@ -453,7 +482,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -605,7 +634,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k] { + if values[i] != labels[k.Name] { return false } } @@ -621,7 +650,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) [] iCurry++ continue } - labelValues[i] = labels[k] + labelValues[i] = labels[k.Name] } return labelValues } @@ -640,3 +669,35 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +func constrainLabels(desc *Desc, labels Labels) Labels { + constrainedLabels := getLabelsFromPool() + for l, v := range labels { + if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { + v = desc.variableLabels[i].Constrain(v) + } + + constrainedLabels[l] = v + } + + return constrainedLabels +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels) { + constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 0000000000..42bc3a8f06 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 1498ee144c..25da157f15 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,12 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d6..f4fc884552 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index f819e4f8b5..dfac962a4e 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -21,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 9d94ae9eff..21cdddcf05 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -46,20 +46,20 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE` and `# HELP` +// line. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - No support for the following (optional) features: `# UNIT` line, `_created` +// line, info type, stateset type, gaugehistogram type. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { name := in.GetName() if name == "" { diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643ec..ac2482782c 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c7..a21b9d15dd 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ package goautoneg diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa8c..5727452c1e 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a28..9eb440413f 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 0000000000..0f615a7053 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 0000000000..54bb038cff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 0000000000..726c50ee63 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/redis/go-redis/v9/.gitignore similarity index 52% rename from vendor/github.com/go-redis/redis/v8/.gitignore rename to vendor/github.com/redis/go-redis/v9/.gitignore index b975a7b4c3..dc322f9be9 100644 --- a/vendor/github.com/go-redis/redis/v8/.gitignore +++ b/vendor/github.com/redis/go-redis/v9/.gitignore @@ -1,3 +1,3 @@ *.rdb -testdata/*/ +testdata/* .idea/ diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml similarity index 100% rename from vendor/github.com/go-redis/redis/v8/.golangci.yml rename to vendor/github.com/redis/go-redis/v9/.golangci.yml diff --git a/vendor/github.com/redis/go-redis/v9/.prettierrc.yml b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml new file mode 100644 index 0000000000..8b7f044ad1 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/.prettierrc.yml @@ -0,0 +1,4 @@ +semi: false +singleQuote: true +proseWrap: always +printWidth: 100 diff --git a/vendor/github.com/redis/go-redis/v9/CHANGELOG.md b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md new file mode 100644 index 0000000000..297438a9fc --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/CHANGELOG.md @@ -0,0 +1,124 @@ +## [9.0.5](https://github.com/redis/go-redis/compare/v9.0.4...v9.0.5) (2023-05-29) + + +### Features + +* Add ACL LOG ([#2536](https://github.com/redis/go-redis/issues/2536)) ([31ba855](https://github.com/redis/go-redis/commit/31ba855ddebc38fbcc69a75d9d4fb769417cf602)) +* add field protocol to setupClusterQueryParams ([#2600](https://github.com/redis/go-redis/issues/2600)) ([840c25c](https://github.com/redis/go-redis/commit/840c25cb6f320501886a82a5e75f47b491e46fbe)) +* add protocol option ([#2598](https://github.com/redis/go-redis/issues/2598)) ([3917988](https://github.com/redis/go-redis/commit/391798880cfb915c4660f6c3ba63e0c1a459e2af)) + + + +## [9.0.4](https://github.com/redis/go-redis/compare/v9.0.3...v9.0.4) (2023-05-01) + + +### Bug Fixes + +* reader float parser ([#2513](https://github.com/redis/go-redis/issues/2513)) ([46f2450](https://github.com/redis/go-redis/commit/46f245075e6e3a8bd8471f9ca67ea95fd675e241)) + + +### Features + +* add client info command ([#2483](https://github.com/redis/go-redis/issues/2483)) ([b8c7317](https://github.com/redis/go-redis/commit/b8c7317cc6af444603731f7017c602347c0ba61e)) +* no longer verify HELLO error messages ([#2515](https://github.com/redis/go-redis/issues/2515)) ([7b4f217](https://github.com/redis/go-redis/commit/7b4f2179cb5dba3d3c6b0c6f10db52b837c912c8)) +* read the structure to increase the judgment of the omitempty op… ([#2529](https://github.com/redis/go-redis/issues/2529)) ([37c057b](https://github.com/redis/go-redis/commit/37c057b8e597c5e8a0e372337f6a8ad27f6030af)) + + + +## [9.0.3](https://github.com/redis/go-redis/compare/v9.0.2...v9.0.3) (2023-04-02) + +### New Features + +- feat(scan): scan time.Time sets the default decoding (#2413) +- Add support for CLUSTER LINKS command (#2504) +- Add support for acl dryrun command (#2502) +- Add support for COMMAND GETKEYS & COMMAND GETKEYSANDFLAGS (#2500) +- Add support for LCS Command (#2480) +- Add support for BZMPOP (#2456) +- Adding support for ZMPOP command (#2408) +- Add support for LMPOP (#2440) +- feat: remove pool unused fields (#2438) +- Expiretime and PExpireTime (#2426) +- Implement `FUNCTION` group of commands (#2475) +- feat(zadd): add ZAddLT and ZAddGT (#2429) +- Add: Support for COMMAND LIST command (#2491) +- Add support for BLMPOP (#2442) +- feat: check pipeline.Do to prevent confusion with Exec (#2517) +- Function stats, function kill, fcall and fcall_ro (#2486) +- feat: Add support for CLUSTER SHARDS command (#2507) +- feat(cmd): support for adding byte,bit parameters to the bitpos command (#2498) + +### Fixed + +- fix: eval api cmd.SetFirstKeyPos (#2501) +- fix: limit the number of connections created (#2441) +- fixed #2462 v9 continue support dragonfly, it's Hello command return "NOAUTH Authentication required" error (#2479) +- Fix for internal/hscan/structmap.go:89:23: undefined: reflect.Pointer (#2458) +- fix: group lag can be null (#2448) + +### Maintenance + +- Updating to the latest version of redis (#2508) +- Allowing for running tests on a port other than the fixed 6380 (#2466) +- redis 7.0.8 in tests (#2450) +- docs: Update redisotel example for v9 (#2425) +- chore: update go mod, Upgrade golang.org/x/net version to 0.7.0 (#2476) +- chore: add Chinese translation (#2436) +- chore(deps): bump github.com/bsm/gomega from 1.20.0 to 1.26.0 (#2421) +- chore(deps): bump github.com/bsm/ginkgo/v2 from 2.5.0 to 2.7.0 (#2420) +- chore(deps): bump actions/setup-go from 3 to 4 (#2495) +- docs: add instructions for the HSet api (#2503) +- docs: add reading lag field comment (#2451) +- test: update go mod before testing(go mod tidy) (#2423) +- docs: fix comment typo (#2505) +- test: remove testify (#2463) +- refactor: change ListElementCmd to KeyValuesCmd. (#2443) +- fix(appendArg): appendArg case special type (#2489) + +## [9.0.2](https://github.com/redis/go-redis/compare/v9.0.1...v9.0.2) (2023-02-01) + +### Features + +* upgrade OpenTelemetry, use the new metrics API. ([#2410](https://github.com/redis/go-redis/issues/2410)) ([e29e42c](https://github.com/redis/go-redis/commit/e29e42cde2755ab910d04185025dc43ce6f59c65)) + +## v9 2023-01-30 + +### Breaking + +- Changed Pipelines to not be thread-safe any more. + +### Added + +- Added support for [RESP3](https://github.com/antirez/RESP3/blob/master/spec.md) protocol. It was + contributed by @monkey92t who has done the majority of work in this release. +- Added `ContextTimeoutEnabled` option that controls whether the client respects context timeouts + and deadlines. See + [Redis Timeouts](https://redis.uptrace.dev/guide/go-redis-debugging.html#timeouts) for details. +- Added `ParseClusterURL` to parse URLs into `ClusterOptions`, for example, + `redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791`. +- Added metrics instrumentation using `redisotel.IstrumentMetrics`. See + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html) +- Added `redis.HasErrorPrefix` to help working with errors. + +### Changed + +- Removed asynchronous cancellation based on the context timeout. It was racy in v8 and is + completely gone in v9. +- Reworked hook interface and added `DialHook`. +- Replaced `redisotel.NewTracingHook` with `redisotel.InstrumentTracing`. See + [example](example/otel) and + [documentation](https://redis.uptrace.dev/guide/go-redis-monitoring.html). +- Replaced `*redis.Z` with `redis.Z` since it is small enough to be passed as value without making + an allocation. +- Renamed the option `MaxConnAge` to `ConnMaxLifetime`. +- Renamed the option `IdleTimeout` to `ConnMaxIdleTime`. +- Removed connection reaper in favor of `MaxIdleConns`. +- Removed `WithContext` since `context.Context` can be passed directly as an arg. +- Removed `Pipeline.Close` since there is no real need to explicitly manage pipeline resources and + it can be safely reused via `sync.Pool` etc. `Pipeline.Discard` is still available if you want to + reset commands for some reason. + +### Fixed + +- Improved and fixed pipeline retries. +- As usually, added support for more commands and fixed some bugs. diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/redis/go-redis/v9/LICENSE similarity index 95% rename from vendor/github.com/go-redis/redis/v8/LICENSE rename to vendor/github.com/redis/go-redis/v9/LICENSE index 298bed9bea..f4967dbc5c 100644 --- a/vendor/github.com/go-redis/redis/v8/LICENSE +++ b/vendor/github.com/redis/go-redis/v9/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013 The github.com/go-redis/redis Authors. +Copyright (c) 2013 The github.com/redis/go-redis Authors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile new file mode 100644 index 0000000000..285f65dd55 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -0,0 +1,41 @@ +GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) + +test: testdeps + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go test in $${dir}"; \ + (cd "$${dir}" && \ + go mod tidy -compat=1.18 && \ + go test && \ + go test ./... -short -race && \ + go test ./... -run=NONE -bench=. -benchmem && \ + env GOOS=linux GOARCH=386 go test && \ + go vet); \ + done + cd internal/customvet && go build . + go vet -vettool ./internal/customvet/customvet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://download.redis.io/releases/redis-7.2-rc1.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + cd $< && make all + +fmt: + gofmt -w -s ./ + goimports -w -local github.com/redis/go-redis ./ + +go_mod_tidy: + set -e; for dir in $(GO_MOD_DIRS); do \ + echo "go mod tidy in $${dir}"; \ + (cd "$${dir}" && \ + go get -u ./... && \ + go mod tidy -compat=1.18); \ + done diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/redis/go-redis/v9/README.md similarity index 55% rename from vendor/github.com/go-redis/redis/v8/README.md rename to vendor/github.com/redis/go-redis/v9/README.md index f3b6a018cb..36d60fd4e4 100644 --- a/vendor/github.com/go-redis/redis/v8/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -1,25 +1,29 @@ # Redis client for Go -![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) +[![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) +[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) -go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). -Uptrace is an open source and blazingly fast **distributed tracing** backend powered by -OpenTelemetry and ClickHouse. Give it a star as well! +> go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an open-source APM tool that supports distributed tracing, metrics, and logs. You can +> use it to monitor applications and set up automatic alerts to receive notifications via email, +> Slack, Telegram, and others. +> +> See [OpenTelemetry](example/otel) example which demonstrates how you can use Uptrace to monitor +> go-redis. -## Resources +## Documentation -- [Discussions](https://github.com/go-redis/redis/discussions) -- [Documentation](https://redis.uptrace.dev) -- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) -- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) -- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) +- [English](https://redis.uptrace.dev) +- [简体中文](https://redis.uptrace.dev/zh/) -Other projects you may like: +## Resources -- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. +- [Discussions](https://github.com/redis/go-redis/discussions) +- [Chat](https://discord.gg/rWtp5Aj) +- [Reference](https://pkg.go.dev/github.com/redis/go-redis/v9) +- [Examples](https://pkg.go.dev/github.com/redis/go-redis/v9#pkg-examples) ## Ecosystem @@ -28,23 +32,20 @@ Other projects you may like: - [Redis Cache](https://github.com/go-redis/cache) - [Rate limiting](https://github.com/go-redis/redis_rate) +This client also works with [Kvrocks](https://github.com/apache/incubator-kvrocks), a distributed +key value NoSQL database that uses RocksDB as storage engine and is compatible with Redis protocol. + ## Features - Redis 3 commands except QUIT, MONITOR, and SYNC. - Automatic connection pooling with - [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). -- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). -- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and - [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline). -- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). -- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). -- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). -- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). -- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup) - without using cluster mode and Redis Sentinel. -- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). -- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation). +- [Pub/Sub](https://redis.uptrace.dev/guide/go-redis-pubsub.html). +- [Pipelines and transactions](https://redis.uptrace.dev/guide/go-redis-pipelines.html). +- [Scripting](https://redis.uptrace.dev/guide/lua-scripting.html). +- [Redis Sentinel](https://redis.uptrace.dev/guide/go-redis-sentinel.html). +- [Redis Cluster](https://redis.uptrace.dev/guide/go-redis-cluster.html). +- [Redis Ring](https://redis.uptrace.dev/guide/ring.html). +- [Redis Performance Monitoring](https://redis.uptrace.dev/guide/redis-performance-monitoring.html). ## Installation @@ -56,10 +57,10 @@ module: go mod init github.com/my/repo ``` -And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): +Then install go-redis/**v9**: ```shell -go get github.com/go-redis/redis/v8 +go get github.com/redis/go-redis/v9 ``` ## Quickstart @@ -67,7 +68,7 @@ go get github.com/go-redis/redis/v8 ```go import ( "context" - "github.com/go-redis/redis/v8" + "github.com/redis/go-redis/v9" "fmt" ) @@ -145,7 +146,7 @@ go-redis will start a redis-server and run the test cases. The paths of redis-server bin file and redis config file are defined in `main_test.go`: -``` +```go var ( redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) @@ -155,21 +156,34 @@ var ( For local testing, you can change the variables to refer to your local files, or create a soft link to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: -``` +```shell ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ ``` Lastly, run: -``` +```shell go test ``` +Another option is to run your specific tests with an already running redis. The example below, tests against a redis running on port 9999.: + +```shell +REDIS_PORT=9999 go test +``` + +## See also + +- [Golang ORM](https://bun.uptrace.dev) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://bunrouter.uptrace.dev/) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + ## Contributors Thanks to all the people who already contributed! - - + + diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/redis/go-redis/v9/RELEASING.md similarity index 100% rename from vendor/github.com/go-redis/redis/v8/RELEASING.md rename to vendor/github.com/redis/go-redis/v9/RELEASING.md diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/redis/go-redis/v9/cluster.go similarity index 74% rename from vendor/github.com/go-redis/redis/v8/cluster.go rename to vendor/github.com/redis/go-redis/v9/cluster.go index a54f2f37ed..941838dd03 100644 --- a/vendor/github.com/go-redis/redis/v8/cluster.go +++ b/vendor/github.com/redis/go-redis/v9/cluster.go @@ -6,17 +6,19 @@ import ( "fmt" "math" "net" + "net/url" "runtime" "sort" + "strings" "sync" "sync/atomic" "time" - "github.com/go-redis/redis/v8/internal" - "github.com/go-redis/redis/v8/internal/hashtag" - "github.com/go-redis/redis/v8/internal/pool" - "github.com/go-redis/redis/v8/internal/proto" - "github.com/go-redis/redis/v8/internal/rand" + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hashtag" + "github.com/redis/go-redis/v9/internal/pool" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/rand" ) var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") @@ -27,6 +29,9 @@ type ClusterOptions struct { // A seed list of host:port addresses of cluster nodes. Addrs []string + // ClientName will execute the `CLIENT SETNAME ClientName` command for each conn. + ClientName string + // NewClient creates a cluster node client with provided name and options. NewClient func(opt *Options) *Client @@ -57,6 +62,7 @@ type ClusterOptions struct { OnConnect func(ctx context.Context, cn *Conn) error + Protocol int Username string Password string @@ -64,20 +70,18 @@ type ClusterOptions struct { MinRetryBackoff time.Duration MaxRetryBackoff time.Duration - DialTimeout time.Duration - ReadTimeout time.Duration - WriteTimeout time.Duration - - // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). - PoolFIFO bool + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + ContextTimeoutEnabled bool - // PoolSize applies per cluster node and not for the whole cluster. - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration + PoolFIFO bool + PoolSize int // applies per cluster node and not for the whole cluster + PoolTimeout time.Duration + MinIdleConns int + MaxIdleConns int + ConnMaxIdleTime time.Duration + ConnMaxLifetime time.Duration TLSConfig *tls.Config } @@ -131,13 +135,137 @@ func (opt *ClusterOptions) init() { } } -func (opt *ClusterOptions) clientOptions() *Options { - const disableIdleCheck = -1 +// ParseClusterURL parses a URL into ClusterOptions that can be used to connect to Redis. +// The URL must be in the form: +// +// redis://:@: +// or +// rediss://:@: +// +// To add additional addresses, specify the query parameter, "addr" one or more times. e.g: +// +// redis://:@:?addr=:&addr=: +// or +// rediss://:@:?addr=:&addr=: +// +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// +// Example: +// +// redis://user:password@localhost:6789?dial_timeout=3&read_timeout=6s&addr=localhost:6790&addr=localhost:6791 +// is equivalent to: +// &ClusterOptions{ +// Addr: ["localhost:6789", "localhost:6790", "localhost:6791"] +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// } +func ParseClusterURL(redisURL string) (*ClusterOptions, error) { + o := &ClusterOptions{} + + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + + // add base URL to the array of addresses + // more addresses may be added through the URL params + h, p := getHostPortWithDefaults(u) + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + + // setup username, password, and other configurations + o, err = setupClusterConn(u, h, o) + if err != nil { + return nil, err + } + + return o, nil +} +// setupClusterConn gets the username and password from the URL and the query parameters. +func setupClusterConn(u *url.URL, host string, o *ClusterOptions) (*ClusterOptions, error) { + switch u.Scheme { + case "rediss": + o.TLSConfig = &tls.Config{ServerName: host} + fallthrough + case "redis": + o.Username, o.Password = getUserPassword(u) + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } + + // retrieve the configuration from the query parameters + o, err := setupClusterQueryParams(u, o) + if err != nil { + return nil, err + } + + return o, nil +} + +// setupClusterQueryParams converts query parameters in u to option value in o. +func setupClusterQueryParams(u *url.URL, o *ClusterOptions) (*ClusterOptions, error) { + q := queryOptions{q: u.Query()} + + o.Protocol = q.int("protocol") + o.ClientName = q.string("client_name") + o.MaxRedirects = q.int("max_redirects") + o.ReadOnly = q.bool("read_only") + o.RouteByLatency = q.bool("route_by_latency") + o.RouteRandomly = q.bool("route_randomly") + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.PoolTimeout = q.duration("pool_timeout") + o.ConnMaxLifetime = q.duration("conn_max_lifetime") + o.ConnMaxIdleTime = q.duration("conn_max_idle_time") + + if q.err != nil { + return nil, q.err + } + + // addr can be specified as many times as needed + addrs := q.strings("addr") + for _, addr := range addrs { + h, p, err := net.SplitHostPort(addr) + if err != nil || h == "" || p == "" { + return nil, fmt.Errorf("redis: unable to parse addr param: %s", addr) + } + + o.Addrs = append(o.Addrs, net.JoinHostPort(h, p)) + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + +func (opt *ClusterOptions) clientOptions() *Options { return &Options{ - Dialer: opt.Dialer, - OnConnect: opt.OnConnect, + ClientName: opt.ClientName, + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + Protocol: opt.Protocol, Username: opt.Username, Password: opt.Password, @@ -149,13 +277,13 @@ func (opt *ClusterOptions) clientOptions() *Options { ReadTimeout: opt.ReadTimeout, WriteTimeout: opt.WriteTimeout, - PoolFIFO: opt.PoolFIFO, - PoolSize: opt.PoolSize, - MinIdleConns: opt.MinIdleConns, - MaxConnAge: opt.MaxConnAge, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: disableIdleCheck, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + MinIdleConns: opt.MinIdleConns, + MaxIdleConns: opt.MaxIdleConns, + ConnMaxIdleTime: opt.ConnMaxIdleTime, + ConnMaxLifetime: opt.ConnMaxLifetime, TLSConfig: opt.TLSConfig, // If ClusterSlots is populated, then we probably have an artificial @@ -204,15 +332,26 @@ func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 + successes := 0 for i := 0; i < numProbe; i++ { time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) start := time.Now() - n.Client.Ping(context.TODO()) - dur += uint64(time.Since(start) / time.Microsecond) + err := n.Client.Ping(context.TODO()).Err() + if err == nil { + dur += uint64(time.Since(start) / time.Microsecond) + successes++ + } } - latency := float64(dur) / float64(numProbe) + var latency float64 + if successes == 0 { + // If none of the pings worked, set latency to some arbitrarily high value so this node gets + // least priority. + latency = float64((1 * time.Minute) / time.Microsecond) + } else { + latency = float64(dur) / float64(successes) + } atomic.StoreUint32(&n.latency, uint32(latency+0.5)) } @@ -262,6 +401,7 @@ type clusterNodes struct { nodes map[string]*clusterNode activeAddrs []string closed bool + onNewNode []func(rdb *Client) _generation uint32 // atomic } @@ -297,6 +437,12 @@ func (c *clusterNodes) Close() error { return firstErr } +func (c *clusterNodes) OnNewNode(fn func(rdb *Client)) { + c.mu.Lock() + c.onNewNode = append(c.onNewNode, fn) + c.mu.Unlock() +} + func (c *clusterNodes) Addrs() ([]string, error) { var addrs []string @@ -374,6 +520,9 @@ func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { } node = newClusterNode(c.opt, addr) + for _, fn := range c.onNewNode { + fn(node.Client) + } c.addrs = appendIfNotExists(c.addrs, addr) c.nodes[addr] = node @@ -683,21 +832,16 @@ func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, er //------------------------------------------------------------------------------ -type clusterClient struct { - opt *ClusterOptions - nodes *clusterNodes - state *clusterStateHolder //nolint:structcheck - cmdsInfoCache *cmdsInfoCache //nolint:structcheck -} - // ClusterClient is a Redis Cluster client representing a pool of zero // or more underlying connections. It's safe for concurrent use by // multiple goroutines. type ClusterClient struct { - *clusterClient + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache cmdable - hooks - ctx context.Context + hooksMixin } // NewClusterClient returns a Redis Cluster client as described in @@ -706,38 +850,24 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient { opt.init() c := &ClusterClient{ - clusterClient: &clusterClient{ - opt: opt, - nodes: newClusterNodes(opt), - }, - ctx: context.Background(), + opt: opt, + nodes: newClusterNodes(opt), } + c.state = newClusterStateHolder(c.loadState) c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) c.cmdable = c.Process - if opt.IdleCheckFrequency > 0 { - go c.reaper(opt.IdleCheckFrequency) - } + c.initHooks(hooks{ + dial: nil, + process: c.process, + pipeline: c.processPipeline, + txPipeline: c.processTxPipeline, + }) return c } -func (c *ClusterClient) Context() context.Context { - return c.ctx -} - -func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { - if ctx == nil { - panic("nil context") - } - clone := *c - clone.cmdable = clone.Process - clone.hooks.lock() - clone.ctx = ctx - return &clone -} - // Options returns read-only Options that were used to create the client. func (c *ClusterClient) Options() *ClusterOptions { return c.opt @@ -757,7 +887,7 @@ func (c *ClusterClient) Close() error { return c.nodes.Close() } -// Do creates a Cmd from the args and processes the cmd. +// Do create a Cmd from the args and processes the cmd. func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { cmd := NewCmd(ctx, args...) _ = c.Process(ctx, cmd) @@ -765,13 +895,14 @@ func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { } func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { - return c.hooks.process(ctx, cmd, c.process) + err := c.processHook(ctx, cmd) + cmd.SetErr(err) + return err } func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { - cmdInfo := c.cmdInfo(cmd.Name()) - slot := c.cmdSlot(cmd) - + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + slot := c.cmdSlot(ctx, cmd) var node *clusterNode var ask bool var lastErr error @@ -791,12 +922,12 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { } if ask { + ask = false + pipe := node.Client.Pipeline() _ = pipe.Process(ctx, NewCmd(ctx, "asking")) _ = pipe.Process(ctx, cmd) _, lastErr = pipe.Exec(ctx) - _ = pipe.Close() - ask = false } else { lastErr = node.Client.Process(ctx, cmd) } @@ -851,6 +982,10 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { return lastErr } +func (c *ClusterClient) OnNewNode(fn func(rdb *Client)) { + c.nodes.OnNewNode(fn) +} + // ForEachMaster concurrently calls the fn on each master node in the cluster. // It returns the first error if any. func (c *ClusterClient) ForEachMaster( @@ -1056,30 +1191,9 @@ func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { return nil, firstErr } -// reaper closes idle connections to the cluster. -func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { - ticker := time.NewTicker(idleCheckFrequency) - defer ticker.Stop() - - for range ticker.C { - nodes, err := c.nodes.All() - if err != nil { - break - } - - for _, node := range nodes { - _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() - if err != nil { - internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) - } - } - } -} - func (c *ClusterClient) Pipeline() Pipeliner { pipe := Pipeline{ - ctx: c.ctx, - exec: c.processPipeline, + exec: pipelineExecer(c.processPipelineHook), } pipe.init() return &pipe @@ -1090,13 +1204,9 @@ func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) } func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processPipeline(ctx, cmds, c._processPipeline) -} - -func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { cmdsMap := newCmdsMap() - err := c.mapCmdsByNode(ctx, cmdsMap, cmds) - if err != nil { + + if err := c.mapCmdsByNode(ctx, cmdsMap, cmds); err != nil { setCmdsErr(cmds, err) return err } @@ -1116,18 +1226,7 @@ func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) erro wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() - - err := c._processPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } + c.processPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } @@ -1147,9 +1246,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd return err } - if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) { for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) node, err := c.slotReadOnlyNode(state, slot) if err != nil { return err @@ -1160,7 +1259,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd } for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) node, err := state.slotMasterNode(slot) if err != nil { return err @@ -1170,9 +1269,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd return nil } -func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { +func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool { for _, cmd := range cmds { - cmdInfo := c.cmdInfo(cmd.Name()) + cmdInfo := c.cmdInfo(ctx, cmd.Name()) if cmdInfo == nil || !cmdInfo.ReadOnly { return false } @@ -1180,22 +1279,42 @@ func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { return true } -func (c *ClusterClient) _processPipelineNode( +func (c *ClusterClient) processPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { - return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err - } + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) - }) - }) + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) }) } @@ -1206,7 +1325,7 @@ func (c *ClusterClient) pipelineReadCmds( cmds []Cmder, failedCmds *cmdsMap, ) error { - for _, cmd := range cmds { + for i, cmd := range cmds { err := cmd.readReply(rd) cmd.SetErr(err) @@ -1218,15 +1337,24 @@ func (c *ClusterClient) pipelineReadCmds( continue } - if c.opt.ReadOnly && isLoadingError(err) { + if c.opt.ReadOnly { node.MarkAsFailing() - return err } - if isRedisError(err) { - continue + + if !isRedisError(err) { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds[i+1:], err) + return err } + } + + if err := cmds[0].Err(); err != nil && shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) return err } + return nil } @@ -1260,8 +1388,10 @@ func (c *ClusterClient) checkMovedErr( // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. func (c *ClusterClient) TxPipeline() Pipeliner { pipe := Pipeline{ - ctx: c.ctx, - exec: c.processTxPipeline, + exec: func(ctx context.Context, cmds []Cmder) error { + cmds = wrapMultiExec(ctx, cmds) + return c.processTxPipelineHook(ctx, cmds) + }, } pipe.init() return &pipe @@ -1272,10 +1402,6 @@ func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) erro } func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { - return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) -} - -func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { // Trim multi .. exec. cmds = cmds[1 : len(cmds)-1] @@ -1285,7 +1411,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er return err } - cmdsMap := c.mapCmdsBySlot(cmds) + cmdsMap := c.mapCmdsBySlot(ctx, cmds) for slot, cmds := range cmdsMap { node, err := state.slotMasterNode(slot) if err != nil { @@ -1309,19 +1435,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er wg.Add(1) go func(node *clusterNode, cmds []Cmder) { defer wg.Done() - - err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) - if err == nil { - return - } - - if attempt < c.opt.MaxRedirects { - if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { - setCmdsErr(cmds, err) - } - } else { - setCmdsErr(cmds, err) - } + c.processTxPipelineNode(ctx, node, cmds, failedCmds) }(node, cmds) } @@ -1336,44 +1450,69 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er return cmdsFirstErr(cmds) } -func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { +func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder { cmdsMap := make(map[int][]Cmder) for _, cmd := range cmds { - slot := c.cmdSlot(cmd) + slot := c.cmdSlot(ctx, cmd) cmdsMap[slot] = append(cmdsMap[slot], cmd) } return cmdsMap } -func (c *ClusterClient) _processTxPipelineNode( +func (c *ClusterClient) processTxPipelineNode( ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) { + cmds = wrapMultiExec(ctx, cmds) + _ = node.Client.withProcessPipelineHook(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + cn, err := node.Client.getConn(ctx) + if err != nil { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + setCmdsErr(cmds, err) + return err + } + + var processErr error + defer func() { + node.Client.releaseConn(ctx, cn, processErr) + }() + processErr = c.processTxPipelineNodeConn(ctx, node, cn, cmds, failedCmds) + + return processErr + }) +} + +func (c *ClusterClient) processTxPipelineNodeConn( + ctx context.Context, node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, ) error { - return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { - return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { - err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { - return writeCmds(wr, cmds) - }) - if err != nil { - return err + if err := cn.WithWriter(c.context(ctx), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }); err != nil { + if shouldRetry(err, true) { + _ = c.mapCmdsByNode(ctx, failedCmds, cmds) + } + setCmdsErr(cmds, err) + return err + } + + return cn.WithReader(c.context(ctx), c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + trimmedCmds := cmds[1 : len(cmds)-1] + + if err := c.txPipelineReadQueued( + ctx, rd, statusCmd, trimmedCmds, failedCmds, + ); err != nil { + setCmdsErr(cmds, err) + + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, trimmedCmds, moved, ask, addr, failedCmds) } - return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { - statusCmd := cmds[0].(*StatusCmd) - // Trim multi and exec. - cmds = cmds[1 : len(cmds)-1] - - err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) - if err != nil { - moved, ask, addr := isMovedError(err) - if moved || ask { - return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) - } - return err - } + return err + } - return pipelineReadCmds(rd, cmds) - }) - }) + return pipelineReadCmds(rd, trimmedCmds) }) } @@ -1406,12 +1545,7 @@ func (c *ClusterClient) txPipelineReadQueued( return err } - switch line[0] { - case proto.ErrorReply: - return proto.ParseErrorReply(line) - case proto.ArrayReply: - // ok - default: + if line[0] != proto.RespArray { return fmt.Errorf("redis: expected '*', but got line %q", line) } @@ -1568,6 +1702,15 @@ func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *Pub return pubsub } +// SSubscribe Subscribes the client to the specified shard channels. +func (c *ClusterClient) SSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.SSubscribe(ctx, channels...) + } + return pubsub +} + func (c *ClusterClient) retryBackoff(attempt int) time.Duration { return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) } @@ -1614,26 +1757,27 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, return nil, firstErr } -func (c *ClusterClient) cmdInfo(name string) *CommandInfo { - cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) +func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(ctx) if err != nil { + internal.Logger.Printf(context.TODO(), "getting command info: %s", err) return nil } info := cmdsInfo[name] if info == nil { - internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + internal.Logger.Printf(context.TODO(), "info for cmd=%s not found", name) } return info } -func (c *ClusterClient) cmdSlot(cmd Cmder) int { +func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int { args := cmd.Args() if args[0] == "cluster" && args[1] == "getkeysinslot" { return args[2].(int) } - cmdInfo := c.cmdInfo(cmd.Name()) + cmdInfo := c.cmdInfo(ctx, cmd.Name()) return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) } @@ -1661,7 +1805,7 @@ func (c *ClusterClient) cmdNode( return state.slotMasterNode(slot) } -func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { +func (c *ClusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { if c.opt.RouteByLatency { return state.slotClosestNode(slot) } @@ -1708,6 +1852,13 @@ func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, return node.Client, err } +func (c *ClusterClient) context(ctx context.Context) context.Context { + if c.opt.ContextTimeoutEnabled { + return ctx + } + return context.Background() +} + func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { for _, n := range nodes { if n == node { diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/redis/go-redis/v9/cluster_commands.go similarity index 85% rename from vendor/github.com/go-redis/redis/v8/cluster_commands.go rename to vendor/github.com/redis/go-redis/v9/cluster_commands.go index 085bce83d5..b13f8e7e9d 100644 --- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go +++ b/vendor/github.com/redis/go-redis/v9/cluster_commands.go @@ -8,7 +8,7 @@ import ( func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { cmd := NewIntCmd(ctx, "dbsize") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { var size int64 err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { n, err := master.DBSize(ctx).Result() @@ -30,8 +30,8 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { cmd := NewStringCmd(ctx, "script", "load", script) - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { val, err := shard.ScriptLoad(ctx, script).Result() if err != nil { @@ -56,7 +56,7 @@ func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCm func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { cmd := NewStatusCmd(ctx, "script", "flush") - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { return shard.ScriptFlush(ctx).Err() }) @@ -82,8 +82,8 @@ func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *Boo result[i] = true } - _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { - mu := &sync.Mutex{} + _ = c.withProcessHook(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var mu sync.Mutex err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { val, err := shard.ScriptExists(ctx, hashes...).Result() if err != nil { diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go new file mode 100644 index 0000000000..f10e7365d6 --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -0,0 +1,5168 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/hscan" + "github.com/redis/go-redis/v9/internal/proto" + "github.com/redis/go-redis/v9/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + firstKeyPos() int8 + SetFirstKeyPos(int8) + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + if pos := cmd.firstKeyPos(); pos != 0 { + return int(pos) + } + + switch cmd.Name() { + case "eval", "evalsha", "eval_ro", "evalsha_ro": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info != nil { + return int(info.FirstKeyPos) + } + return 1 +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return util.BytesToString(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + keyPos int8 + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } +} + +func (cmd *baseCmd) firstKeyPos() int8 { + return cmd.keyPos +} + +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { + cmd.keyPos = keyPos +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) SetVal(val interface{}) { + cmd.val = val +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + return toString(cmd.val) +} + +func toString(val interface{}) (string, error) { + switch val := val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toInt64(cmd.val) +} + +func toInt64(val interface{}) (int64, error) { + switch val := val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toUint64(cmd.val) +} + +func toUint64(val interface{}) (uint64, error) { + switch val := val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat32(cmd.val) +} + +func toFloat32(val interface{}) (float32, error) { + switch val := val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat64(cmd.val) +} + +func toFloat64(val interface{}) (float64, error) { + switch val := val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return toBool(cmd.val) +} + +func toBool(val interface{}) (bool, error) { + switch val := val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) Slice() ([]interface{}, error) { + if cmd.err != nil { + return nil, cmd.err + } + switch val := cmd.val.(type) { + case []interface{}: + return val, nil + default: + return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) + } +} + +func (cmd *Cmd) StringSlice() ([]string, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + ss := make([]string, len(slice)) + for i, iface := range slice { + val, err := toString(iface) + if err != nil { + return nil, err + } + ss[i] = val + } + return ss, nil +} + +func (cmd *Cmd) Int64Slice() ([]int64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]int64, len(slice)) + for i, iface := range slice { + val, err := toInt64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Uint64Slice() ([]uint64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]uint64, len(slice)) + for i, iface := range slice { + val, err := toUint64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Float32Slice() ([]float32, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float32, len(slice)) + for i, iface := range slice { + val, err := toFloat32(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) Float64Slice() ([]float64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float64, len(slice)) + for i, iface := range slice { + val, err := toFloat64(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) BoolSlice() ([]bool, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + bools := make([]bool, len(slice)) + for i, iface := range slice { + val, err := toBool(iface) + if err != nil { + return nil, err + } + bools[i] = val + } + return bools, nil +} + +func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadReply() + return err +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) SetVal(val []interface{}) { + cmd.val = val +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *SliceCmd) Scan(dst interface{}) error { + if cmd.err != nil { + return cmd.err + } + + // Pass the list of keys and values. + // Skip the first two args for: HMGET key + var args []interface{} + if cmd.args[0] == "hmget" { + args = cmd.args[2:] + } else { + // Otherwise, it's: MGET field field ... + args = cmd.args[1:] + } + + return hscan.Scan(dst, args, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadSlice() + return err +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) SetVal(val int64) { + cmd.val = val +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadInt() + return err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) SetVal(val []int64) { + cmd.val = val +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + if cmd.val[i], err = rd.ReadInt(); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) SetVal(val time.Duration) { + cmd.val = val +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadInt() + if err != nil { + return err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) SetVal(val time.Time) { + cmd.val = val +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + second, err := rd.ReadInt() + if err != nil { + return err + } + microsecond, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val = time.Unix(second, microsecond*1000) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) SetVal(val bool) { + cmd.val = val +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadBool() + + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if err == Nil { + cmd.val = false + err = nil + } + return err +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return strconv.ParseBool(cmd.val) +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) SetVal(val float64) { + cmd.val = val +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadFloat() + return err +} + +//------------------------------------------------------------------------------ + +type FloatSliceCmd struct { + baseCmd + + val []float64 +} + +var _ Cmder = (*FloatSliceCmd)(nil) + +func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { + return &FloatSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatSliceCmd) SetVal(val []float64) { + cmd.val = val +} + +func (cmd *FloatSliceCmd) Val() []float64 { + return cmd.val +} + +func (cmd *FloatSliceCmd) Result() ([]float64, error) { + return cmd.val, cmd.err +} + +func (cmd *FloatSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]float64, n) + for i := 0; i < len(cmd.val); i++ { + switch num, err := rd.ReadFloat(); { + case err == Nil: + cmd.val[i] = 0 + case err != nil: + return err + default: + cmd.val[i] = num + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) SetVal(val []string) { + cmd.val = val +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return err + default: + cmd.val[i] = s + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type KeyValue struct { + Key string + Value string +} + +type KeyValueSliceCmd struct { + baseCmd + + val []KeyValue +} + +var _ Cmder = (*KeyValueSliceCmd)(nil) + +func NewKeyValueSliceCmd(ctx context.Context, args ...interface{}) *KeyValueSliceCmd { + return &KeyValueSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyValueSliceCmd) SetVal(val []KeyValue) { + cmd.val = val +} + +func (cmd *KeyValueSliceCmd) Val() []KeyValue { + return cmd.val +} + +func (cmd *KeyValueSliceCmd) Result() ([]KeyValue, error) { + return cmd.val, cmd.err +} + +func (cmd *KeyValueSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Many commands will respond to two formats: +// 1. 1) "one" +// 2. (double) 1 +// 2. 1) "two" +// 2. (double) 2 +// +// OR: +// 1. "two" +// 2. (double) 2 +// 3. "one" +// 4. (double) 1 +func (cmd *KeyValueSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + // If the n is 0, can't continue reading. + if n == 0 { + cmd.val = make([]KeyValue, 0) + return nil + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]KeyValue, n) + } else { + cmd.val = make([]KeyValue, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Key, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Value, err = rd.ReadString(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) SetVal(val []bool) { + cmd.val = val +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + if cmd.val[i], err = rd.ReadBool(); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type MapStringStringCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*MapStringStringCmd)(nil) + +func NewMapStringStringCmd(ctx context.Context, args ...interface{}) *MapStringStringCmd { + return &MapStringStringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringStringCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *MapStringStringCmd) SetVal(val map[string]string) { + cmd.val = val +} + +func (cmd *MapStringStringCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *MapStringStringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *MapStringStringCmd) Scan(dest interface{}) error { + if cmd.err != nil { + return cmd.err + } + + strct, err := hscan.Struct(dest) + if err != nil { + return err + } + + for k, v := range cmd.val { + if err := strct.Scan(k, v); err != nil { + return err + } + } + + return nil +} + +func (cmd *MapStringStringCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]string, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + value, err := rd.ReadString() + if err != nil { + return err + } + + cmd.val[key] = value + } + return nil +} + +//------------------------------------------------------------------------------ + +type MapStringIntCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*MapStringIntCmd)(nil) + +func NewMapStringIntCmd(ctx context.Context, args ...interface{}) *MapStringIntCmd { + return &MapStringIntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringIntCmd) SetVal(val map[string]int64) { + cmd.val = val +} + +func (cmd *MapStringIntCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *MapStringIntCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *MapStringIntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringIntCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]int64, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + nn, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[key] = nn + } + return nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { + cmd.val = val +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make(map[string]struct{}, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[key] = struct{}{} + } + return nil +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { + cmd.val = val +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = readXMessageSlice(rd) + return err +} + +func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + msgs := make([]XMessage, n) + for i := 0; i < len(msgs); i++ { + if msgs[i], err = readXMessage(rd); err != nil { + return nil, err + } + } + return msgs, nil +} + +func readXMessage(rd *proto.Reader) (XMessage, error) { + if err := rd.ReadFixedArrayLen(2); err != nil { + return XMessage{}, err + } + + id, err := rd.ReadString() + if err != nil { + return XMessage{}, err + } + + v, err := stringInterfaceMapParser(rd) + if err != nil { + if err != proto.Nil { + return XMessage{}, err + } + } + + return XMessage{ + ID: id, + Values: v, + }, nil +} + +func stringInterfaceMapParser(rd *proto.Reader) (map[string]interface{}, error) { + n, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + m := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) SetVal(val []XStream) { + cmd.val = val +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + + var n int + if typ == proto.RespMap { + n, err = rd.ReadMapLen() + } else { + n, err = rd.ReadArrayLen() + } + if err != nil { + return err + } + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + if typ != proto.RespMap { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + if cmd.val[i].Stream, err = rd.ReadString(); err != nil { + return err + } + if cmd.val[i].Messages, err = readXMessageSlice(rd); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) SetVal(val *XPending) { + cmd.val = val +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + var err error + if err = rd.ReadFixedArrayLen(4); err != nil { + return err + } + cmd.val = &XPending{} + + if cmd.val.Count, err = rd.ReadInt(); err != nil { + return err + } + + if cmd.val.Lower, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + if cmd.val.Higher, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil && err != Nil { + return err + } + cmd.val.Consumers = make(map[string]int64, n) + for i := 0; i < n; i++ { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + consumerName, err := rd.ReadString() + if err != nil { + return err + } + consumerPending, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val.Consumers[consumerName] = consumerPending + } + return nil +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { + cmd.val = val +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XPendingExt, n) + + for i := 0; i < len(cmd.val); i++ { + if err = rd.ReadFixedArrayLen(4); err != nil { + return err + } + + if cmd.val[i].ID, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Consumer, err = rd.ReadString(); err != nil && err != Nil { + return err + } + + idle, err := rd.ReadInt() + if err != nil && err != Nil { + return err + } + cmd.val[i].Idle = time.Duration(idle) * time.Millisecond + + if cmd.val[i].RetryCount, err = rd.ReadInt(); err != nil && err != Nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XAutoClaimCmd struct { + baseCmd + + start string + val []XMessage +} + +var _ Cmder = (*XAutoClaimCmd)(nil) + +func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { + return &XAutoClaimCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaim reply, wanted 2/3", n) + } + + cmd.start, err = rd.ReadString() + if err != nil { + return err + } + + cmd.val, err = readXMessageSlice(rd) + if err != nil { + return err + } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XAutoClaimJustIDCmd struct { + baseCmd + + start string + val []string +} + +var _ Cmder = (*XAutoClaimJustIDCmd)(nil) + +func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { + return &XAutoClaimJustIDCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimJustIDCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch n { + case 2, // Redis 6 + 3: // Redis 7: + // ok + default: + return fmt.Errorf("redis: got %d elements in XAutoClaimJustID reply, wanted 2/3", n) + } + + cmd.start, err = rd.ReadString() + if err != nil { + return err + } + + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return err + } + } + + if n >= 3 { + if err := rd.DiscardNext(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoConsumersCmd struct { + baseCmd + val []XInfoConsumer +} + +type XInfoConsumer struct { + Name string + Pending int64 + Idle time.Duration + Inactive time.Duration +} + +var _ Cmder = (*XInfoConsumersCmd)(nil) + +func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { + return &XInfoConsumersCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + }, + } +} + +func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { + cmd.val = val +} + +func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { + return cmd.val +} + +func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoConsumersCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XInfoConsumer, n) + + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + for f := 0; f < nn; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "name": + cmd.val[i].Name, err = rd.ReadString() + case "pending": + cmd.val[i].Pending, err = rd.ReadInt() + case "idle": + var idle int64 + idle, err = rd.ReadInt() + cmd.val[i].Idle = time.Duration(idle) * time.Millisecond + case "inactive": + var inactive int64 + inactive, err = rd.ReadInt() + cmd.val[i].Inactive = time.Duration(inactive) * time.Millisecond + default: + return fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) + } + if err != nil { + return err + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroup +} + +type XInfoGroup struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string + EntriesRead int64 + Lag int64 +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { + cmd.val = val +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]XInfoGroup, n) + + for i := 0; i < len(cmd.val); i++ { + group := &cmd.val[i] + + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + for j := 0; j < nn; j++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + if err != nil { + return err + } + case "consumers": + group.Consumers, err = rd.ReadInt() + if err != nil { + return err + } + case "pending": + group.Pending, err = rd.ReadInt() + if err != nil { + return err + } + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return err + } + case "lag": + group.Lag, err = rd.ReadInt() + + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + if err != nil && err != Nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO GROUPS reply", key) + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamCmd struct { + baseCmd + val *XInfoStream +} + +type XInfoStream struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + FirstEntry XMessage + LastEntry XMessage + RecordedFirstEntryID string +} + +var _ Cmder = (*XInfoStreamCmd)(nil) + +func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { + return &XInfoStreamCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + }, + } +} + +func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { + cmd.val = val +} + +func (cmd *XInfoStreamCmd) Val() *XInfoStream { + return cmd.val +} + +func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val = &XInfoStream{} + + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + switch key { + case "length": + cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } + case "groups": + cmd.val.Groups, err = rd.ReadInt() + if err != nil { + return err + } + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "first-entry": + cmd.val.FirstEntry, err = readXMessage(rd) + if err != nil && err != Nil { + return err + } + case "last-entry": + cmd.val.LastEntry, err = readXMessage(rd) + if err != nil && err != Nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM reply", key) + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamFullCmd struct { + baseCmd + val *XInfoStreamFull +} + +type XInfoStreamFull struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + MaxDeletedEntryID string + EntriesAdded int64 + Entries []XMessage + Groups []XInfoStreamGroup + RecordedFirstEntryID string +} + +type XInfoStreamGroup struct { + Name string + LastDeliveredID string + EntriesRead int64 + Lag int64 + PelCount int64 + Pending []XInfoStreamGroupPending + Consumers []XInfoStreamConsumer +} + +type XInfoStreamGroupPending struct { + ID string + Consumer string + DeliveryTime time.Time + DeliveryCount int64 +} + +type XInfoStreamConsumer struct { + Name string + SeenTime time.Time + ActiveTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending +} + +type XInfoStreamConsumerPending struct { + ID string + DeliveryTime time.Time + DeliveryCount int64 +} + +var _ Cmder = (*XInfoStreamFullCmd)(nil) + +func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { + return &XInfoStreamFullCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { + cmd.val = val +} + +func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { + return cmd.val +} + +func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamFullCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = &XInfoStreamFull{} + + for i := 0; i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "length": + cmd.val.Length, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadInt() + if err != nil { + return err + } + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadInt() + if err != nil { + return err + } + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + if err != nil { + return err + } + case "entries-added": + cmd.val.EntriesAdded, err = rd.ReadInt() + if err != nil { + return err + } + case "entries": + cmd.val.Entries, err = readXMessageSlice(rd) + if err != nil { + return err + } + case "groups": + cmd.val.Groups, err = readStreamGroups(rd) + if err != nil { + return err + } + case "max-deleted-entry-id": + cmd.val.MaxDeletedEntryID, err = rd.ReadString() + if err != nil { + return err + } + case "recorded-first-entry-id": + cmd.val.RecordedFirstEntryID, err = rd.ReadString() + if err != nil { + return err + } + default: + return fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) + } + } + return nil +} + +func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + groups := make([]XInfoStreamGroup, 0, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + group := XInfoStreamGroup{} + + for j := 0; j < nn; j++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + if err != nil { + return nil, err + } + case "entries-read": + group.EntriesRead, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } + case "lag": + // lag: the number of entries in the stream that are still waiting to be delivered + // to the group's consumers, or a NULL(Nil) when that number can't be determined. + group.Lag, err = rd.ReadInt() + if err != nil && err != Nil { + return nil, err + } + case "pel-count": + group.PelCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + case "pending": + group.Pending, err = readXInfoStreamGroupPending(rd) + if err != nil { + return nil, err + } + case "consumers": + group.Consumers, err = readXInfoStreamConsumers(rd) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("redis: unexpected key %q in XINFO STREAM FULL reply", key) + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + pending := make([]XInfoStreamGroupPending, 0, n) + + for i := 0; i < n; i++ { + if err = rd.ReadFixedArrayLen(4); err != nil { + return nil, err + } + + p := XInfoStreamGroupPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + p.Consumer, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadInt() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + + pending = append(pending, p) + } + + return pending, nil +} + +func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + consumers := make([]XInfoStreamConsumer, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + c := XInfoStreamConsumer{} + + for f := 0; f < nn; f++ { + cKey, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch cKey { + case "name": + c.Name, err = rd.ReadString() + case "seen-time": + seen, err := rd.ReadInt() + if err != nil { + return nil, err + } + c.SeenTime = time.UnixMilli(seen) + case "active-time": + active, err := rd.ReadInt() + if err != nil { + return nil, err + } + c.ActiveTime = time.UnixMilli(active) + case "pel-count": + c.PelCount, err = rd.ReadInt() + case "pending": + pendingNumber, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) + + for pn := 0; pn < pendingNumber; pn++ { + if err = rd.ReadFixedArrayLen(3); err != nil { + return nil, err + } + + p := XInfoStreamConsumerPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadInt() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadInt() + if err != nil { + return nil, err + } + + c.Pending = append(c.Pending, p) + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM FULL reply", cKey) + } + if err != nil { + return nil, err + } + } + consumers = append(consumers, c) + } + + return consumers, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) SetVal(val []Z) { + cmd.val = val +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { // nolint:dupl + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + // If the n is 0, can't continue reading. + if n == 0 { + cmd.val = make([]Z, 0) + return nil + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]Z, n) + } else { + cmd.val = make([]Z, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Member, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { + cmd.val = val +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(3); err != nil { + return err + } + cmd.val = &ZWithKey{} + + if cmd.val.Key, err = rd.ReadString(); err != nil { + return err + } + if cmd.val.Member, err = rd.ReadString(); err != nil { + return err + } + if cmd.val.Score, err = rd.ReadFloat(); err != nil { + return err + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { + cmd.page = page + cmd.cursor = cursor +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cursor, err := rd.ReadUint() + if err != nil { + return err + } + cmd.cursor = cursor + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.page = make([]string, n) + + for i := 0; i < len(cmd.page); i++ { + if cmd.page[i], err = rd.ReadString(); err != nil { + return err + } + } + return nil +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string + NetworkingMetadata map[string]string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { + cmd.val = val +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterSlot, n) + + for i := 0; i < len(cmd.val); i++ { + n, err = rd.ReadArrayLen() + if err != nil { + return err + } + if n < 2 { + return fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + } + + start, err := rd.ReadInt() + if err != nil { + return err + } + + end, err := rd.ReadInt() + if err != nil { + return err + } + + // subtract start and end. + nodes := make([]ClusterNode, n-2) + + for j := 0; j < len(nodes); j++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 2 || nn > 4 { + return fmt.Errorf("got %d elements in cluster info address, expected 2, 3, or 4", n) + } + + ip, err := rd.ReadString() + if err != nil { + return err + } + + port, err := rd.ReadString() + if err != nil { + return err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if nn >= 3 { + id, err := rd.ReadString() + if err != nil { + return err + } + nodes[j].ID = id + } + + if nn >= 4 { + metadataLength, err := rd.ReadMapLen() + if err != nil { + return err + } + + networkingMetadata := make(map[string]string, metadataLength) + + for i := 0; i < metadataLength; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + value, err := rd.ReadString() + if err != nil { + return err + } + networkingMetadata[key] = value + } + + nodes[j].NetworkingMetadata = networkingMetadata + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string + + // WithCoord+WithDist+WithGeoHash + withLen int +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + q.withLen++ + } + if q.WithDist { + args = append(args, "withdist") + q.withLen++ + } + if q.WithGeoHash { + args = append(args, "withhash") + q.withLen++ + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { + cmd.locations = locations +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.locations = make([]GeoLocation, n) + + for i := 0; i < len(cmd.locations); i++ { + // only name + if cmd.q.withLen == 0 { + if cmd.locations[i].Name, err = rd.ReadString(); err != nil { + return err + } + continue + } + + // +name + if err = rd.ReadFixedArrayLen(cmd.q.withLen + 1); err != nil { + return err + } + + if cmd.locations[i].Name, err = rd.ReadString(); err != nil { + return err + } + if cmd.q.WithDist { + if cmd.locations[i].Dist, err = rd.ReadFloat(); err != nil { + return err + } + } + if cmd.q.WithGeoHash { + if cmd.locations[i].GeoHash, err = rd.ReadInt(); err != nil { + return err + } + } + if cmd.q.WithCoord { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + if cmd.locations[i].Longitude, err = rd.ReadFloat(); err != nil { + return err + } + if cmd.locations[i].Latitude, err = rd.ReadFloat(); err != nil { + return err + } + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. +type GeoSearchQuery struct { + Member string + + // Latitude and Longitude when using FromLonLat option. + Longitude float64 + Latitude float64 + + // Distance and unit when using ByRadius option. + // Can use m, km, ft, or mi. Default is km. + Radius float64 + RadiusUnit string + + // Height, width and unit when using ByBox option. + // Can be m, km, ft, or mi. Default is km. + BoxWidth float64 + BoxHeight float64 + BoxUnit string + + // Can be ASC or DESC. Default is no sort order. + Sort string + Count int + CountAny bool +} + +type GeoSearchLocationQuery struct { + GeoSearchQuery + + WithCoord bool + WithDist bool + WithHash bool +} + +type GeoSearchStoreQuery struct { + GeoSearchQuery + + // When using the StoreDist option, the command stores the items in a + // sorted set populated with their distance from the center of the circle or box, + // as a floating-point number, in the same unit specified for that shape. + StoreDist bool +} + +func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { + args = geoSearchArgs(&q.GeoSearchQuery, args) + + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithHash { + args = append(args, "withhash") + } + + return args +} + +func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { + if q.Member != "" { + args = append(args, "frommember", q.Member) + } else { + args = append(args, "fromlonlat", q.Longitude, q.Latitude) + } + + if q.Radius > 0 { + if q.RadiusUnit == "" { + q.RadiusUnit = "km" + } + args = append(args, "byradius", q.Radius, q.RadiusUnit) + } else { + if q.BoxUnit == "" { + q.BoxUnit = "km" + } + args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) + } + + if q.Sort != "" { + args = append(args, q.Sort) + } + + if q.Count > 0 { + args = append(args, "count", q.Count) + if q.CountAny { + args = append(args, "any") + } + } + + return args +} + +type GeoSearchLocationCmd struct { + baseCmd + + opt *GeoSearchLocationQuery + val []GeoLocation +} + +var _ Cmder = (*GeoSearchLocationCmd)(nil) + +func NewGeoSearchLocationCmd( + ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, +) *GeoSearchLocationCmd { + return &GeoSearchLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + opt: opt, + } +} + +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + +func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { + return cmd.val +} + +func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { + return cmd.val, cmd.err +} + +func (cmd *GeoSearchLocationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]GeoLocation, n) + for i := 0; i < n; i++ { + _, err = rd.ReadArrayLen() + if err != nil { + return err + } + + var loc GeoLocation + + loc.Name, err = rd.ReadString() + if err != nil { + return err + } + if cmd.opt.WithDist { + loc.Dist, err = rd.ReadFloat() + if err != nil { + return err + } + } + if cmd.opt.WithHash { + loc.GeoHash, err = rd.ReadInt() + if err != nil { + return err + } + } + if cmd.opt.WithCoord { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + loc.Longitude, err = rd.ReadFloat() + if err != nil { + return err + } + loc.Latitude, err = rd.ReadFloat() + if err != nil { + return err + } + } + + cmd.val[i] = loc + } + + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { + cmd.val = val +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]*GeoPos, n) + + for i := 0; i < len(cmd.val); i++ { + err = rd.ReadFixedArrayLen(2) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return err + } + + longitude, err := rd.ReadFloat() + if err != nil { + return err + } + latitude, err := rd.ReadFloat() + if err != nil { + return err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { + cmd.val = val +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + const numArgRedis7 = 10 + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make(map[string]*CommandInfo, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + + switch nn { + case numArgRedis5, numArgRedis6, numArgRedis7: + // ok + default: + return fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6/7/10", nn) + } + + cmdInfo := &CommandInfo{} + if cmdInfo.Name, err = rd.ReadString(); err != nil { + return err + } + + arity, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.Arity = int8(arity) + + flagLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmdInfo.Flags = make([]string, flagLen) + for f := 0; f < len(cmdInfo.Flags); f++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmdInfo.Flags[f] = "" + case err != nil: + return err + default: + if !cmdInfo.ReadOnly && s == "readonly" { + cmdInfo.ReadOnly = true + } + cmdInfo.Flags[f] = s + } + } + + firstKeyPos, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadInt() + if err != nil { + return err + } + cmdInfo.StepCount = int8(stepCount) + + if nn >= numArgRedis6 { + aclFlagLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmdInfo.ACLFlags = make([]string, aclFlagLen) + for f := 0; f < len(cmdInfo.ACLFlags); f++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmdInfo.ACLFlags[f] = "" + case err != nil: + return err + default: + cmdInfo.ACLFlags[f] = s + } + } + } + + if nn >= numArgRedis7 { + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + if err := rd.DiscardNext(); err != nil { + return err + } + } + + cmd.val[cmdInfo.Name] = cmdInfo + } + + return nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func(ctx context.Context) (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn(ctx) + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} + +//------------------------------------------------------------------------------ + +type SlowLog struct { + ID int64 + Time time.Time + Duration time.Duration + Args []string + // These are also optional fields emitted only by Redis 4.0 or greater: + // https://redis.io/commands/slowlog#output-format + ClientAddr string + ClientName string +} + +type SlowLogCmd struct { + baseCmd + + val []SlowLog +} + +var _ Cmder = (*SlowLogCmd)(nil) + +func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { + return &SlowLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SlowLogCmd) SetVal(val []SlowLog) { + cmd.val = val +} + +func (cmd *SlowLogCmd) Val() []SlowLog { + return cmd.val +} + +func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *SlowLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]SlowLog, n) + + for i := 0; i < len(cmd.val); i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn < 4 { + return fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", nn) + } + + if cmd.val[i].ID, err = rd.ReadInt(); err != nil { + return err + } + + createdAt, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Time = time.Unix(createdAt, 0) + + costs, err := rd.ReadInt() + if err != nil { + return err + } + cmd.val[i].Duration = time.Duration(costs) * time.Microsecond + + cmdLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + if cmdLen < 1 { + return fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) + } + + cmd.val[i].Args = make([]string, cmdLen) + for f := 0; f < len(cmd.val[i].Args); f++ { + cmd.val[i].Args[f], err = rd.ReadString() + if err != nil { + return err + } + } + + if nn >= 5 { + if cmd.val[i].ClientAddr, err = rd.ReadString(); err != nil { + return err + } + } + + if nn >= 6 { + if cmd.val[i].ClientName, err = rd.ReadString(); err != nil { + return err + } + } + } + + return nil +} + +//----------------------------------------------------------------------- + +type MapStringInterfaceCmd struct { + baseCmd + + val map[string]interface{} +} + +var _ Cmder = (*MapStringInterfaceCmd)(nil) + +func NewMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapStringInterfaceCmd { + return &MapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +func (cmd *MapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringInterfaceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + cmd.val = make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + cmd.val[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + cmd.val[k] = err + continue + } + return err + } + cmd.val[k] = v + } + return nil +} + +//----------------------------------------------------------------------- + +type MapStringStringSliceCmd struct { + baseCmd + + val []map[string]string +} + +var _ Cmder = (*MapStringStringSliceCmd)(nil) + +func NewMapStringStringSliceCmd(ctx context.Context, args ...interface{}) *MapStringStringSliceCmd { + return &MapStringStringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapStringStringSliceCmd) SetVal(val []map[string]string) { + cmd.val = val +} + +func (cmd *MapStringStringSliceCmd) Val() []map[string]string { + return cmd.val +} + +func (cmd *MapStringStringSliceCmd) Result() ([]map[string]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *MapStringStringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]map[string]string, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + cmd.val[i] = make(map[string]string, nn) + for f := 0; f < nn; f++ { + k, err := rd.ReadString() + if err != nil { + return err + } + + v, err := rd.ReadString() + if err != nil { + return err + } + cmd.val[i][k] = v + } + } + return nil +} + +//------------------------------------------------------------------------------ + +type KeyValuesCmd struct { + baseCmd + + key string + val []string +} + +var _ Cmder = (*KeyValuesCmd)(nil) + +func NewKeyValuesCmd(ctx context.Context, args ...interface{}) *KeyValuesCmd { + return &KeyValuesCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyValuesCmd) SetVal(key string, val []string) { + cmd.key = key + cmd.val = val +} + +func (cmd *KeyValuesCmd) Val() (string, []string) { + return cmd.key, cmd.val +} + +func (cmd *KeyValuesCmd) Result() (string, []string, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *KeyValuesCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyValuesCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]string, n) + for i := 0; i < n; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return err + } + } + + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceWithKeyCmd struct { + baseCmd + + key string + val []Z +} + +var _ Cmder = (*ZSliceWithKeyCmd)(nil) + +func NewZSliceWithKeyCmd(ctx context.Context, args ...interface{}) *ZSliceWithKeyCmd { + return &ZSliceWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceWithKeyCmd) SetVal(key string, val []Z) { + cmd.key = key + cmd.val = val +} + +func (cmd *ZSliceWithKeyCmd) Val() (string, []Z) { + return cmd.key, cmd.val +} + +func (cmd *ZSliceWithKeyCmd) Result() (string, []Z, error) { + return cmd.key, cmd.val, cmd.err +} + +func (cmd *ZSliceWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceWithKeyCmd) readReply(rd *proto.Reader) (err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + cmd.key, err = rd.ReadString() + if err != nil { + return err + } + + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + typ, err := rd.PeekReplyType() + if err != nil { + return err + } + array := typ == proto.RespArray + + if array { + cmd.val = make([]Z, n) + } else { + cmd.val = make([]Z, n/2) + } + + for i := 0; i < len(cmd.val); i++ { + if array { + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + } + + if cmd.val[i].Member, err = rd.ReadString(); err != nil { + return err + } + + if cmd.val[i].Score, err = rd.ReadFloat(); err != nil { + return err + } + } + + return nil +} + +type Function struct { + Name string + Description string + Flags []string +} + +type Library struct { + Name string + Engine string + Functions []Function + Code string +} + +type FunctionListCmd struct { + baseCmd + + val []Library +} + +var _ Cmder = (*FunctionListCmd)(nil) + +func NewFunctionListCmd(ctx context.Context, args ...interface{}) *FunctionListCmd { + return &FunctionListCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionListCmd) SetVal(val []Library) { + cmd.val = val +} + +func (cmd *FunctionListCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionListCmd) Val() []Library { + return cmd.val +} + +func (cmd *FunctionListCmd) Result() ([]Library, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionListCmd) First() (*Library, error) { + if cmd.err != nil { + return nil, cmd.err + } + if len(cmd.val) > 0 { + return &cmd.val[0], nil + } + return nil, Nil +} + +func (cmd *FunctionListCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + libraries := make([]Library, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return err + } + + library := Library{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "library_name": + library.Name, err = rd.ReadString() + case "engine": + library.Engine, err = rd.ReadString() + case "functions": + library.Functions, err = cmd.readFunctions(rd) + case "library_code": + library.Code, err = rd.ReadString() + default: + return fmt.Errorf("redis: function list unexpected key %s", key) + } + + if err != nil { + return err + } + } + + libraries[i] = library + } + cmd.val = libraries + return nil +} + +func (cmd *FunctionListCmd) readFunctions(rd *proto.Reader) ([]Function, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + functions := make([]Function, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + function := Function{} + for f := 0; f < nn; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + if function.Name, err = rd.ReadString(); err != nil { + return nil, err + } + case "description": + if function.Description, err = rd.ReadString(); err != nil && err != Nil { + return nil, err + } + case "flags": + // resp set + nx, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + function.Flags = make([]string, nx) + for j := 0; j < nx; j++ { + if function.Flags[j], err = rd.ReadString(); err != nil { + return nil, err + } + } + default: + return nil, fmt.Errorf("redis: function list unexpected key %s", key) + } + } + + functions[i] = function + } + return functions, nil +} + +// FunctionStats contains information about the scripts currently executing on the server, and the available engines +// - Engines: +// Statistics about the engine like number of functions and number of libraries +// - RunningScript: +// The script currently running on the shard we're connecting to. +// For Redis Enterprise and Redis Cloud, this represents the +// function with the longest running time, across all the running functions, on all shards +// - RunningScripts +// All scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +type FunctionStats struct { + Engines []Engine + isRunning bool + rs RunningScript + allrs []RunningScript +} + +func (fs *FunctionStats) Running() bool { + return fs.isRunning +} + +func (fs *FunctionStats) RunningScript() (RunningScript, bool) { + return fs.rs, fs.isRunning +} + +// AllRunningScripts returns all scripts currently running in a Redis Enterprise clustered database. +// Only available on Redis Enterprise +func (fs *FunctionStats) AllRunningScripts() []RunningScript { + return fs.allrs +} + +type RunningScript struct { + Name string + Command []string + Duration time.Duration +} + +type Engine struct { + Language string + LibrariesCount int64 + FunctionsCount int64 +} + +type FunctionStatsCmd struct { + baseCmd + val FunctionStats +} + +var _ Cmder = (*FunctionStatsCmd)(nil) + +func NewFunctionStatsCmd(ctx context.Context, args ...interface{}) *FunctionStatsCmd { + return &FunctionStatsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FunctionStatsCmd) SetVal(val FunctionStats) { + cmd.val = val +} + +func (cmd *FunctionStatsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FunctionStatsCmd) Val() FunctionStats { + return cmd.val +} + +func (cmd *FunctionStatsCmd) Result() (FunctionStats, error) { + return cmd.val, cmd.err +} + +func (cmd *FunctionStatsCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + var key string + var result FunctionStats + for f := 0; f < n; f++ { + key, err = rd.ReadString() + if err != nil { + return err + } + + switch key { + case "running_script": + result.rs, result.isRunning, err = cmd.readRunningScript(rd) + case "engines": + result.Engines, err = cmd.readEngines(rd) + case "all_running_scripts": // Redis Enterprise only + result.allrs, result.isRunning, err = cmd.readRunningScripts(rd) + default: + return fmt.Errorf("redis: function stats unexpected key %s", key) + } + + if err != nil { + return err + } + } + + cmd.val = result + return nil +} + +func (cmd *FunctionStatsCmd) readRunningScript(rd *proto.Reader) (RunningScript, bool, error) { + err := rd.ReadFixedMapLen(3) + if err != nil { + if err == Nil { + return RunningScript{}, false, nil + } + return RunningScript{}, false, err + } + + var runningScript RunningScript + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return RunningScript{}, false, err + } + + switch key { + case "name": + runningScript.Name, err = rd.ReadString() + case "duration_ms": + runningScript.Duration, err = cmd.readDuration(rd) + case "command": + runningScript.Command, err = cmd.readCommand(rd) + default: + return RunningScript{}, false, fmt.Errorf("redis: function stats unexpected running_script key %s", key) + } + + if err != nil { + return RunningScript{}, false, err + } + } + + return runningScript, true, nil +} + +func (cmd *FunctionStatsCmd) readEngines(rd *proto.Reader) ([]Engine, error) { + n, err := rd.ReadMapLen() + if err != nil { + return nil, err + } + + engines := make([]Engine, 0, n) + for i := 0; i < n; i++ { + engine := Engine{} + engine.Language, err = rd.ReadString() + if err != nil { + return nil, err + } + + err = rd.ReadFixedMapLen(2) + if err != nil { + return nil, fmt.Errorf("redis: function stats unexpected %s engine map length", engine.Language) + } + + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + switch key { + case "libraries_count": + engine.LibrariesCount, err = rd.ReadInt() + case "functions_count": + engine.FunctionsCount, err = rd.ReadInt() + } + if err != nil { + return nil, err + } + } + + engines = append(engines, engine) + } + return engines, nil +} + +func (cmd *FunctionStatsCmd) readDuration(rd *proto.Reader) (time.Duration, error) { + t, err := rd.ReadInt() + if err != nil { + return time.Duration(0), err + } + return time.Duration(t) * time.Millisecond, nil +} + +func (cmd *FunctionStatsCmd) readCommand(rd *proto.Reader) ([]string, error) { + + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + command := make([]string, 0, n) + for i := 0; i < n; i++ { + x, err := rd.ReadString() + if err != nil { + return nil, err + } + command = append(command, x) + } + + return command, nil +} +func (cmd *FunctionStatsCmd) readRunningScripts(rd *proto.Reader) ([]RunningScript, bool, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, false, err + } + + runningScripts := make([]RunningScript, 0, n) + for i := 0; i < n; i++ { + rs, _, err := cmd.readRunningScript(rd) + if err != nil { + return nil, false, err + } + runningScripts = append(runningScripts, rs) + } + + return runningScripts, len(runningScripts) > 0, nil +} + +//------------------------------------------------------------------------------ + +// LCSQuery is a parameter used for the LCS command +type LCSQuery struct { + Key1 string + Key2 string + Len bool + Idx bool + MinMatchLen int + WithMatchLen bool +} + +// LCSMatch is the result set of the LCS command. +type LCSMatch struct { + MatchString string + Matches []LCSMatchedPosition + Len int64 +} + +type LCSMatchedPosition struct { + Key1 LCSPosition + Key2 LCSPosition + + // only for withMatchLen is true + MatchLen int64 +} + +type LCSPosition struct { + Start int64 + End int64 +} + +type LCSCmd struct { + baseCmd + + // 1: match string + // 2: match len + // 3: match idx LCSMatch + readType uint8 + val *LCSMatch +} + +func NewLCSCmd(ctx context.Context, q *LCSQuery) *LCSCmd { + args := make([]interface{}, 3, 7) + args[0] = "lcs" + args[1] = q.Key1 + args[2] = q.Key2 + + cmd := &LCSCmd{readType: 1} + if q.Len { + cmd.readType = 2 + args = append(args, "len") + } else if q.Idx { + cmd.readType = 3 + args = append(args, "idx") + if q.MinMatchLen != 0 { + args = append(args, "minmatchlen", q.MinMatchLen) + } + if q.WithMatchLen { + args = append(args, "withmatchlen") + } + } + cmd.baseCmd = baseCmd{ + ctx: ctx, + args: args, + } + + return cmd +} + +func (cmd *LCSCmd) SetVal(val *LCSMatch) { + cmd.val = val +} + +func (cmd *LCSCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *LCSCmd) Val() *LCSMatch { + return cmd.val +} + +func (cmd *LCSCmd) Result() (*LCSMatch, error) { + return cmd.val, cmd.err +} + +func (cmd *LCSCmd) readReply(rd *proto.Reader) (err error) { + lcs := &LCSMatch{} + switch cmd.readType { + case 1: + // match string + if lcs.MatchString, err = rd.ReadString(); err != nil { + return err + } + case 2: + // match len + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + case 3: + // read LCSMatch + if err = rd.ReadFixedMapLen(2); err != nil { + return err + } + + // read matches or len field + for i := 0; i < 2; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "matches": + // read array of matched positions + if lcs.Matches, err = cmd.readMatchedPositions(rd); err != nil { + return err + } + case "len": + // read match length + if lcs.Len, err = rd.ReadInt(); err != nil { + return err + } + } + } + } + + cmd.val = lcs + return nil +} + +func (cmd *LCSCmd) readMatchedPositions(rd *proto.Reader) ([]LCSMatchedPosition, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + positions := make([]LCSMatchedPosition, n) + for i := 0; i < n; i++ { + pn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + if positions[i].Key1, err = cmd.readPosition(rd); err != nil { + return nil, err + } + if positions[i].Key2, err = cmd.readPosition(rd); err != nil { + return nil, err + } + + // read match length if WithMatchLen is true + if pn > 2 { + if positions[i].MatchLen, err = rd.ReadInt(); err != nil { + return nil, err + } + } + } + + return positions, nil +} + +func (cmd *LCSCmd) readPosition(rd *proto.Reader) (pos LCSPosition, err error) { + if err = rd.ReadFixedArrayLen(2); err != nil { + return pos, err + } + if pos.Start, err = rd.ReadInt(); err != nil { + return pos, err + } + if pos.End, err = rd.ReadInt(); err != nil { + return pos, err + } + + return pos, nil +} + +// ------------------------------------------------------------------------ + +type KeyFlags struct { + Key string + Flags []string +} + +type KeyFlagsCmd struct { + baseCmd + + val []KeyFlags +} + +var _ Cmder = (*KeyFlagsCmd)(nil) + +func NewKeyFlagsCmd(ctx context.Context, args ...interface{}) *KeyFlagsCmd { + return &KeyFlagsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *KeyFlagsCmd) SetVal(val []KeyFlags) { + cmd.val = val +} + +func (cmd *KeyFlagsCmd) Val() []KeyFlags { + return cmd.val +} + +func (cmd *KeyFlagsCmd) Result() ([]KeyFlags, error) { + return cmd.val, cmd.err +} + +func (cmd *KeyFlagsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *KeyFlagsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + if n == 0 { + cmd.val = make([]KeyFlags, 0) + return nil + } + + cmd.val = make([]KeyFlags, n) + + for i := 0; i < len(cmd.val); i++ { + + if err = rd.ReadFixedArrayLen(2); err != nil { + return err + } + + if cmd.val[i].Key, err = rd.ReadString(); err != nil { + return err + } + flagsLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Flags = make([]string, flagsLen) + + for j := 0; j < flagsLen; j++ { + if cmd.val[i].Flags[j], err = rd.ReadString(); err != nil { + return err + } + } + } + + return nil +} + +// --------------------------------------------------------------------------------------------------- + +type ClusterLink struct { + Direction string + Node string + CreateTime int64 + Events string + SendBufferAllocated int64 + SendBufferUsed int64 +} + +type ClusterLinksCmd struct { + baseCmd + + val []ClusterLink +} + +var _ Cmder = (*ClusterLinksCmd)(nil) + +func NewClusterLinksCmd(ctx context.Context, args ...interface{}) *ClusterLinksCmd { + return &ClusterLinksCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterLinksCmd) SetVal(val []ClusterLink) { + cmd.val = val +} + +func (cmd *ClusterLinksCmd) Val() []ClusterLink { + return cmd.val +} + +func (cmd *ClusterLinksCmd) Result() ([]ClusterLink, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterLinksCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterLinksCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterLink, n) + + for i := 0; i < len(cmd.val); i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "direction": + cmd.val[i].Direction, err = rd.ReadString() + case "node": + cmd.val[i].Node, err = rd.ReadString() + case "create-time": + cmd.val[i].CreateTime, err = rd.ReadInt() + case "events": + cmd.val[i].Events, err = rd.ReadString() + case "send-buffer-allocated": + cmd.val[i].SendBufferAllocated, err = rd.ReadInt() + case "send-buffer-used": + cmd.val[i].SendBufferUsed, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER LINKS reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} + +// ------------------------------------------------------------------------------------------------------------------ + +type SlotRange struct { + Start int64 + End int64 +} + +type Node struct { + ID string + Endpoint string + IP string + Hostname string + Port int64 + TLSPort int64 + Role string + ReplicationOffset int64 + Health string +} + +type ClusterShard struct { + Slots []SlotRange + Nodes []Node +} + +type ClusterShardsCmd struct { + baseCmd + + val []ClusterShard +} + +var _ Cmder = (*ClusterShardsCmd)(nil) + +func NewClusterShardsCmd(ctx context.Context, args ...interface{}) *ClusterShardsCmd { + return &ClusterShardsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterShardsCmd) SetVal(val []ClusterShard) { + cmd.val = val +} + +func (cmd *ClusterShardsCmd) Val() []ClusterShard { + return cmd.val +} + +func (cmd *ClusterShardsCmd) Result() ([]ClusterShard, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterShardsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterShardsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val = make([]ClusterShard, n) + + for i := 0; i < n; i++ { + m, err := rd.ReadMapLen() + if err != nil { + return err + } + + for j := 0; j < m; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "slots": + l, err := rd.ReadArrayLen() + if err != nil { + return err + } + for k := 0; k < l; k += 2 { + start, err := rd.ReadInt() + if err != nil { + return err + } + + end, err := rd.ReadInt() + if err != nil { + return err + } + + cmd.val[i].Slots = append(cmd.val[i].Slots, SlotRange{Start: start, End: end}) + } + case "nodes": + nodesLen, err := rd.ReadArrayLen() + if err != nil { + return err + } + cmd.val[i].Nodes = make([]Node, nodesLen) + for k := 0; k < nodesLen; k++ { + nodeMapLen, err := rd.ReadMapLen() + if err != nil { + return err + } + + for l := 0; l < nodeMapLen; l++ { + nodeKey, err := rd.ReadString() + if err != nil { + return err + } + + switch nodeKey { + case "id": + cmd.val[i].Nodes[k].ID, err = rd.ReadString() + case "endpoint": + cmd.val[i].Nodes[k].Endpoint, err = rd.ReadString() + case "ip": + cmd.val[i].Nodes[k].IP, err = rd.ReadString() + case "hostname": + cmd.val[i].Nodes[k].Hostname, err = rd.ReadString() + case "port": + cmd.val[i].Nodes[k].Port, err = rd.ReadInt() + case "tls-port": + cmd.val[i].Nodes[k].TLSPort, err = rd.ReadInt() + case "role": + cmd.val[i].Nodes[k].Role, err = rd.ReadString() + case "replication-offset": + cmd.val[i].Nodes[k].ReplicationOffset, err = rd.ReadInt() + case "health": + cmd.val[i].Nodes[k].Health, err = rd.ReadString() + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS node reply", nodeKey) + } + + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("redis: unexpected key %q in CLUSTER SHARDS reply", key) + } + } + } + + return nil +} + +// ----------------------------------------- + +type RankScore struct { + Rank int64 + Score float64 +} + +type RankWithScoreCmd struct { + baseCmd + + val RankScore +} + +var _ Cmder = (*RankWithScoreCmd)(nil) + +func NewRankWithScoreCmd(ctx context.Context, args ...interface{}) *RankWithScoreCmd { + return &RankWithScoreCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *RankWithScoreCmd) SetVal(val RankScore) { + cmd.val = val +} + +func (cmd *RankWithScoreCmd) Val() RankScore { + return cmd.val +} + +func (cmd *RankWithScoreCmd) Result() (RankScore, error) { + return cmd.val, cmd.err +} + +func (cmd *RankWithScoreCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *RankWithScoreCmd) readReply(rd *proto.Reader) error { + if err := rd.ReadFixedArrayLen(2); err != nil { + return err + } + + rank, err := rd.ReadInt() + if err != nil { + return err + } + + score, err := rd.ReadFloat() + if err != nil { + return err + } + + cmd.val = RankScore{Rank: rank, Score: score} + + return nil +} + +// -------------------------------------------------------------------------------------------------- + +// ClientFlags is redis-server client flags, copy from redis/src/server.h (redis 7.0) +type ClientFlags uint64 + +const ( + ClientSlave ClientFlags = 1 << 0 /* This client is a replica */ + ClientMaster ClientFlags = 1 << 1 /* This client is a master */ + ClientMonitor ClientFlags = 1 << 2 /* This client is a slave monitor, see MONITOR */ + ClientMulti ClientFlags = 1 << 3 /* This client is in a MULTI context */ + ClientBlocked ClientFlags = 1 << 4 /* The client is waiting in a blocking operation */ + ClientDirtyCAS ClientFlags = 1 << 5 /* Watched keys modified. EXEC will fail. */ + ClientCloseAfterReply ClientFlags = 1 << 6 /* Close after writing entire reply. */ + ClientUnBlocked ClientFlags = 1 << 7 /* This client was unblocked and is stored in server.unblocked_clients */ + ClientScript ClientFlags = 1 << 8 /* This is a non-connected client used by Lua */ + ClientAsking ClientFlags = 1 << 9 /* Client issued the ASKING command */ + ClientCloseASAP ClientFlags = 1 << 10 /* Close this client ASAP */ + ClientUnixSocket ClientFlags = 1 << 11 /* Client connected via Unix domain socket */ + ClientDirtyExec ClientFlags = 1 << 12 /* EXEC will fail for errors while queueing */ + ClientMasterForceReply ClientFlags = 1 << 13 /* Queue replies even if is master */ + ClientForceAOF ClientFlags = 1 << 14 /* Force AOF propagation of current cmd. */ + ClientForceRepl ClientFlags = 1 << 15 /* Force replication of current cmd. */ + ClientPrePSync ClientFlags = 1 << 16 /* Instance don't understand PSYNC. */ + ClientReadOnly ClientFlags = 1 << 17 /* Cluster client is in read-only state. */ + ClientPubSub ClientFlags = 1 << 18 /* Client is in Pub/Sub mode. */ + ClientPreventAOFProp ClientFlags = 1 << 19 /* Don't propagate to AOF. */ + ClientPreventReplProp ClientFlags = 1 << 20 /* Don't propagate to slaves. */ + ClientPreventProp ClientFlags = ClientPreventAOFProp | ClientPreventReplProp + ClientPendingWrite ClientFlags = 1 << 21 /* Client has output to send but a-write handler is yet not installed. */ + ClientReplyOff ClientFlags = 1 << 22 /* Don't send replies to client. */ + ClientReplySkipNext ClientFlags = 1 << 23 /* Set ClientREPLY_SKIP for next cmd */ + ClientReplySkip ClientFlags = 1 << 24 /* Don't send just this reply. */ + ClientLuaDebug ClientFlags = 1 << 25 /* Run EVAL in debug mode. */ + ClientLuaDebugSync ClientFlags = 1 << 26 /* EVAL debugging without fork() */ + ClientModule ClientFlags = 1 << 27 /* Non connected client used by some module. */ + ClientProtected ClientFlags = 1 << 28 /* Client should not be freed for now. */ + ClientExecutingCommand ClientFlags = 1 << 29 /* Indicates that the client is currently in the process of handling + a command. usually this will be marked only during call() + however, blocked clients might have this flag kept until they + will try to reprocess the command. */ + ClientPendingCommand ClientFlags = 1 << 30 /* Indicates the client has a fully * parsed command ready for execution. */ + ClientTracking ClientFlags = 1 << 31 /* Client enabled keys tracking in order to perform client side caching. */ + ClientTrackingBrokenRedir ClientFlags = 1 << 32 /* Target client is invalid. */ + ClientTrackingBCAST ClientFlags = 1 << 33 /* Tracking in BCAST mode. */ + ClientTrackingOptIn ClientFlags = 1 << 34 /* Tracking in opt-in mode. */ + ClientTrackingOptOut ClientFlags = 1 << 35 /* Tracking in opt-out mode. */ + ClientTrackingCaching ClientFlags = 1 << 36 /* CACHING yes/no was given, depending on optin/optout mode. */ + ClientTrackingNoLoop ClientFlags = 1 << 37 /* Don't send invalidation messages about writes performed by myself.*/ + ClientInTimeoutTable ClientFlags = 1 << 38 /* This client is in the timeout table. */ + ClientProtocolError ClientFlags = 1 << 39 /* Protocol error chatting with it. */ + ClientCloseAfterCommand ClientFlags = 1 << 40 /* Close after executing commands * and writing entire reply. */ + ClientDenyBlocking ClientFlags = 1 << 41 /* Indicate that the client should not be blocked. currently, turned on inside MULTI, Lua, RM_Call, and AOF client */ + ClientReplRDBOnly ClientFlags = 1 << 42 /* This client is a replica that only wants RDB without replication buffer. */ + ClientNoEvict ClientFlags = 1 << 43 /* This client is protected against client memory eviction. */ + ClientAllowOOM ClientFlags = 1 << 44 /* Client used by RM_Call is allowed to fully execute scripts even when in OOM */ + ClientNoTouch ClientFlags = 1 << 45 /* This client will not touch LFU/LRU stats. */ + ClientPushing ClientFlags = 1 << 46 /* This client is pushing notifications. */ +) + +// ClientInfo is redis-server ClientInfo, not go-redis *Client +type ClientInfo struct { + ID int64 // redis version 2.8.12, a unique 64-bit client ID + Addr string // address/port of the client + LAddr string // address/port of local address client connected to (bind address) + FD int64 // file descriptor corresponding to the socket + Name string // the name set by the client with CLIENT SETNAME + Age time.Duration // total duration of the connection in seconds + Idle time.Duration // idle time of the connection in seconds + Flags ClientFlags // client flags (see below) + DB int // current database ID + Sub int // number of channel subscriptions + PSub int // number of pattern matching subscriptions + SSub int // redis version 7.0.3, number of shard channel subscriptions + Multi int // number of commands in a MULTI/EXEC context + QueryBuf int // qbuf, query buffer length (0 means no query pending) + QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) + ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) + MultiMem int // redis version 7.0, memory is used up by buffered multi commands + BufferSize int // rbs, usable size of buffer + BufferPeak int // rbp, peak used size of buffer in last 5 sec interval + OutputBufferLength int // obl, output buffer length + OutputListLength int // oll, output list length (replies are queued in this list when the buffer is full) + OutputMemory int // omem, output buffer memory usage + TotalMemory int // tot-mem, total memory consumed by this client in its various buffers + Events string // file descriptor events (see below) + LastCmd string // cmd, last command played + User string // the authenticated username of the client + Redir int64 // client id of current client tracking redirection + Resp int // redis version 7.0, client RESP protocol version + LibName string // redis version 7.2, client library name + LibVer string // redis version 7.2, client library version +} + +type ClientInfoCmd struct { + baseCmd + + val *ClientInfo +} + +var _ Cmder = (*ClientInfoCmd)(nil) + +func NewClientInfoCmd(ctx context.Context, args ...interface{}) *ClientInfoCmd { + return &ClientInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClientInfoCmd) SetVal(val *ClientInfo) { + cmd.val = val +} + +func (cmd *ClientInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClientInfoCmd) Val() *ClientInfo { + return cmd.val +} + +func (cmd *ClientInfoCmd) Result() (*ClientInfo, error) { + return cmd.val, cmd.err +} + +func (cmd *ClientInfoCmd) readReply(rd *proto.Reader) (err error) { + txt, err := rd.ReadString() + if err != nil { + return err + } + + // sds o = catClientInfoString(sdsempty(), c); + // o = sdscatlen(o,"\n",1); + // addReplyVerbatim(c,o,sdslen(o),"txt"); + // sdsfree(o); + cmd.val, err = parseClientInfo(strings.TrimSpace(txt)) + return err +} + +// fmt.Sscanf() cannot handle null values +func parseClientInfo(txt string) (info *ClientInfo, err error) { + info = &ClientInfo{} + for _, s := range strings.Split(txt, " ") { + kv := strings.Split(s, "=") + if len(kv) != 2 { + return nil, fmt.Errorf("redis: unexpected client info data (%s)", s) + } + key, val := kv[0], kv[1] + + switch key { + case "id": + info.ID, err = strconv.ParseInt(val, 10, 64) + case "addr": + info.Addr = val + case "laddr": + info.LAddr = val + case "fd": + info.FD, err = strconv.ParseInt(val, 10, 64) + case "name": + info.Name = val + case "age": + var age int + if age, err = strconv.Atoi(val); err == nil { + info.Age = time.Duration(age) * time.Second + } + case "idle": + var idle int + if idle, err = strconv.Atoi(val); err == nil { + info.Idle = time.Duration(idle) * time.Second + } + case "flags": + if val == "N" { + break + } + + for i := 0; i < len(val); i++ { + switch val[i] { + case 'S': + info.Flags |= ClientSlave + case 'O': + info.Flags |= ClientSlave | ClientMonitor + case 'M': + info.Flags |= ClientMaster + case 'P': + info.Flags |= ClientPubSub + case 'x': + info.Flags |= ClientMulti + case 'b': + info.Flags |= ClientBlocked + case 't': + info.Flags |= ClientTracking + case 'R': + info.Flags |= ClientTrackingBrokenRedir + case 'B': + info.Flags |= ClientTrackingBCAST + case 'd': + info.Flags |= ClientDirtyCAS + case 'c': + info.Flags |= ClientCloseAfterCommand + case 'u': + info.Flags |= ClientUnBlocked + case 'A': + info.Flags |= ClientCloseASAP + case 'U': + info.Flags |= ClientUnixSocket + case 'r': + info.Flags |= ClientReadOnly + case 'e': + info.Flags |= ClientNoEvict + case 'T': + info.Flags |= ClientNoTouch + default: + return nil, fmt.Errorf("redis: unexpected client info flags(%s)", string(val[i])) + } + } + case "db": + info.DB, err = strconv.Atoi(val) + case "sub": + info.Sub, err = strconv.Atoi(val) + case "psub": + info.PSub, err = strconv.Atoi(val) + case "ssub": + info.SSub, err = strconv.Atoi(val) + case "multi": + info.Multi, err = strconv.Atoi(val) + case "qbuf": + info.QueryBuf, err = strconv.Atoi(val) + case "qbuf-free": + info.QueryBufFree, err = strconv.Atoi(val) + case "argv-mem": + info.ArgvMem, err = strconv.Atoi(val) + case "multi-mem": + info.MultiMem, err = strconv.Atoi(val) + case "rbs": + info.BufferSize, err = strconv.Atoi(val) + case "rbp": + info.BufferPeak, err = strconv.Atoi(val) + case "obl": + info.OutputBufferLength, err = strconv.Atoi(val) + case "oll": + info.OutputListLength, err = strconv.Atoi(val) + case "omem": + info.OutputMemory, err = strconv.Atoi(val) + case "tot-mem": + info.TotalMemory, err = strconv.Atoi(val) + case "events": + info.Events = val + case "cmd": + info.LastCmd = val + case "user": + info.User = val + case "redir": + info.Redir, err = strconv.ParseInt(val, 10, 64) + case "resp": + info.Resp, err = strconv.Atoi(val) + case "lib-name": + info.LibName = val + case "lib-ver": + info.LibVer = val + default: + return nil, fmt.Errorf("redis: unexpected client info key(%s)", key) + } + + if err != nil { + return nil, err + } + } + + return info, nil +} + +// ------------------------------------------- + +type ACLLogEntry struct { + Count int64 + Reason string + Context string + Object string + Username string + AgeSeconds float64 + ClientInfo *ClientInfo + EntryID int64 + TimestampCreated int64 + TimestampLastUpdated int64 +} + +type ACLLogCmd struct { + baseCmd + + val []*ACLLogEntry +} + +var _ Cmder = (*ACLLogCmd)(nil) + +func NewACLLogCmd(ctx context.Context, args ...interface{}) *ACLLogCmd { + return &ACLLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ACLLogCmd) SetVal(val []*ACLLogEntry) { + cmd.val = val +} + +func (cmd *ACLLogCmd) Val() []*ACLLogEntry { + return cmd.val +} + +func (cmd *ACLLogCmd) Result() ([]*ACLLogEntry, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ACLLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ACLLogCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]*ACLLogEntry, n) + for i := 0; i < n; i++ { + cmd.val[i] = &ACLLogEntry{} + entry := cmd.val[i] + respLen, err := rd.ReadMapLen() + if err != nil { + return err + } + for j := 0; j < respLen; j++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "count": + entry.Count, err = rd.ReadInt() + case "reason": + entry.Reason, err = rd.ReadString() + case "context": + entry.Context, err = rd.ReadString() + case "object": + entry.Object, err = rd.ReadString() + case "username": + entry.Username, err = rd.ReadString() + case "age-seconds": + entry.AgeSeconds, err = rd.ReadFloat() + case "client-info": + txt, err := rd.ReadString() + if err != nil { + return err + } + entry.ClientInfo, err = parseClientInfo(strings.TrimSpace(txt)) + if err != nil { + return err + } + case "entry-id": + entry.EntryID, err = rd.ReadInt() + case "timestamp-created": + entry.TimestampCreated, err = rd.ReadInt() + case "timestamp-last-updated": + entry.TimestampLastUpdated, err = rd.ReadInt() + default: + return fmt.Errorf("redis: unexpected key %q in ACL LOG reply", key) + } + + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go similarity index 79% rename from vendor/github.com/go-redis/redis/v8/commands.go rename to vendor/github.com/redis/go-redis/v9/commands.go index bbfe089df1..34f4d2c227 100644 --- a/vendor/github.com/go-redis/redis/v8/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -2,18 +2,22 @@ package redis import ( "context" + "encoding" "errors" "io" + "net" + "reflect" + "strings" "time" - "github.com/go-redis/redis/v8/internal" + "github.com/redis/go-redis/v9/internal" ) // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, // otherwise you will receive an error: (error) ERR syntax error. // For example: // -// rdb.Set(ctx, key, value, redis.KeepTTL) +// rdb.Set(ctx, key, value, redis.KeepTTL) const KeepTTL = -1 func usePrecise(dur time.Duration) bool { @@ -73,11 +77,84 @@ func appendArg(dst []interface{}, arg interface{}) []interface{} { dst = append(dst, k, v) } return dst + case time.Time, time.Duration, encoding.BinaryMarshaler, net.IP: + return append(dst, arg) default: + // scan struct field + v := reflect.ValueOf(arg) + if v.Type().Kind() == reflect.Ptr { + if v.IsNil() { + // error: arg is not a valid object + return dst + } + v = v.Elem() + } + + if v.Type().Kind() == reflect.Struct { + return appendStructField(dst, v) + } + return append(dst, arg) } } +// appendStructField appends the field and value held by the structure v to dst, and returns the appended dst. +func appendStructField(dst []interface{}, v reflect.Value) []interface{} { + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + tag := typ.Field(i).Tag.Get("redis") + if tag == "" || tag == "-" { + continue + } + name, opt, _ := strings.Cut(tag, ",") + if name == "" { + continue + } + + field := v.Field(i) + + // miss field + if omitEmpty(opt) && isEmptyValue(field) { + continue + } + + if field.CanInterface() { + dst = append(dst, name, field.Interface()) + } + } + + return dst +} + +func omitEmpty(opt string) bool { + for opt != "" { + var name string + name, opt, _ = strings.Cut(opt, ",") + if name == "omitempty" { + return true + } + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + } + return false +} + type Cmdable interface { Pipeline() Pipeliner Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) @@ -86,6 +163,9 @@ type Cmdable interface { TxPipeline() Pipeliner Command(ctx context.Context) *CommandsInfoCmd + CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd + CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd + CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd ClientGetName(ctx context.Context) *StringCmd Echo(ctx context.Context, message interface{}) *StringCmd Ping(ctx context.Context) *StatusCmd @@ -96,6 +176,7 @@ type Cmdable interface { Exists(ctx context.Context, keys ...string) *IntCmd Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireTime(ctx context.Context, key string) *DurationCmd ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd @@ -109,6 +190,7 @@ type Cmdable interface { Persist(ctx context.Context, key string) *BoolCmd PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PExpireTime(ctx context.Context, key string) *DurationCmd PTTL(ctx context.Context, key string) *DurationCmd RandomKey(ctx context.Context) *StringCmd Rename(ctx context.Context, key, newkey string) *StatusCmd @@ -116,6 +198,7 @@ type Cmdable interface { Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd Touch(ctx context.Context, keys ...string) *IntCmd @@ -137,8 +220,7 @@ type Cmdable interface { MSetNX(ctx context.Context, values ...interface{}) *BoolCmd Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd - // TODO: rename to SetEx - SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd @@ -153,6 +235,7 @@ type Cmdable interface { BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd BitOpNot(ctx context.Context, destKey string, key string) *IntCmd BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd @@ -164,7 +247,7 @@ type Cmdable interface { HDel(ctx context.Context, key string, fields ...string) *IntCmd HExists(ctx context.Context, key, field string) *BoolCmd HGet(ctx context.Context, key, field string) *StringCmd - HGetAll(ctx context.Context, key string) *StringStringMapCmd + HGetAll(ctx context.Context, key string) *MapStringStringCmd HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd HKeys(ctx context.Context, key string) *StringSliceCmd @@ -174,16 +257,20 @@ type Cmdable interface { HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HVals(ctx context.Context, key string) *StringSliceCmd - HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd + HRandField(ctx context.Context, key string, count int) *StringSliceCmd + HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LCS(ctx context.Context, q *LCSQuery) *LCSCmd LIndex(ctx context.Context, key string, index int64) *StringCmd LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd LLen(ctx context.Context, key string) *IntCmd + LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd LPop(ctx context.Context, key string) *StringCmd LPopCount(ctx context.Context, key string, count int) *StringSliceCmd LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd @@ -207,6 +294,7 @@ type Cmdable interface { SDiff(ctx context.Context, keys ...string) *StringSliceCmd SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd @@ -244,10 +332,6 @@ type Cmdable interface { XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd - - // TODO: XTrim and XTrimApprox remove in v9. - XTrim(ctx context.Context, key string, maxLen int64) *IntCmd - XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd XTrimMinID(ctx context.Context, key string, minID string) *IntCmd @@ -259,35 +343,24 @@ type Cmdable interface { BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd - // TODO: remove - // ZAddCh - // ZIncr - // ZAddNXCh - // ZAddXXCh - // ZIncrNX - // ZIncrXX - // in v9. - // use ZAddArgs and ZAddArgsIncr. - - ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd - ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAdd(ctx context.Context, key string, members ...Z) *IntCmd + ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd + ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd - ZIncr(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd - ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd ZCard(ctx context.Context, key string) *IntCmd ZCount(ctx context.Context, key, min, max string) *IntCmd ZLexCount(ctx context.Context, key, min, max string) *IntCmd ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd ZInter(ctx context.Context, store *ZStore) *StringSliceCmd ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd @@ -300,6 +373,7 @@ type Cmdable interface { ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd ZRank(ctx context.Context, key, member string) *IntCmd + ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd @@ -310,11 +384,13 @@ type Cmdable interface { ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd ZRevRank(ctx context.Context, key, member string) *IntCmd + ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd ZScore(ctx context.Context, key, member string) *FloatCmd ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd + ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd ZUnion(ctx context.Context, store ZStore) *StringSliceCmd ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd - ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd ZDiff(ctx context.Context, keys ...string) *StringSliceCmd ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd @@ -328,9 +404,13 @@ type Cmdable interface { ClientKill(ctx context.Context, ipPort string) *StatusCmd ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd ClientList(ctx context.Context) *StringCmd + ClientInfo(ctx context.Context) *ClientInfoCmd ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientUnpause(ctx context.Context) *BoolCmd ClientID(ctx context.Context) *IntCmd - ConfigGet(ctx context.Context, parameter string) *SliceCmd + ClientUnblock(ctx context.Context, id int64) *IntCmd + ClientUnblockWithError(ctx context.Context, id int64) *IntCmd + ConfigGet(ctx context.Context, parameter string) *MapStringStringCmd ConfigResetStat(ctx context.Context) *StatusCmd ConfigSet(ctx context.Context, parameter, value string) *StatusCmd ConfigRewrite(ctx context.Context) *StatusCmd @@ -346,6 +426,7 @@ type Cmdable interface { ShutdownSave(ctx context.Context) *StatusCmd ShutdownNoSave(ctx context.Context) *StatusCmd SlaveOf(ctx context.Context, host, port string) *StatusCmd + SlowLogGet(ctx context.Context, num int64) *SlowLogCmd Time(ctx context.Context) *TimeCmd DebugObject(ctx context.Context, key string) *StringCmd ReadOnly(ctx context.Context) *StatusCmd @@ -354,17 +435,39 @@ type Cmdable interface { Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + EvalRO(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalShaRO(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd ScriptFlush(ctx context.Context) *StatusCmd ScriptKill(ctx context.Context) *StatusCmd ScriptLoad(ctx context.Context, script string) *StringCmd + FunctionLoad(ctx context.Context, code string) *StringCmd + FunctionLoadReplace(ctx context.Context, code string) *StringCmd + FunctionDelete(ctx context.Context, libName string) *StringCmd + FunctionFlush(ctx context.Context) *StringCmd + FunctionKill(ctx context.Context) *StringCmd + FunctionFlushAsync(ctx context.Context) *StringCmd + FunctionList(ctx context.Context, q FunctionListQuery) *FunctionListCmd + FunctionDump(ctx context.Context) *StringCmd + FunctionRestore(ctx context.Context, libDump string) *StringCmd + FunctionStats(ctx context.Context) *FunctionStatsCmd + FCall(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRo(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + FCallRO(ctx context.Context, function string, keys []string, args ...interface{}) *Cmd + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + SPublish(ctx context.Context, channel string, message interface{}) *IntCmd PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd - PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumSub(ctx context.Context, channels ...string) *MapStringIntCmd PubSubNumPat(ctx context.Context) *IntCmd + PubSubShardChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubShardNumSub(ctx context.Context, channels ...string) *MapStringIntCmd + ClusterMyShardID(ctx context.Context) *StringCmd ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterShards(ctx context.Context) *ClusterShardsCmd + ClusterLinks(ctx context.Context) *ClusterLinksCmd ClusterNodes(ctx context.Context) *StringCmd ClusterMeet(ctx context.Context, host, port string) *StatusCmd ClusterForget(ctx context.Context, nodeID string) *StatusCmd @@ -395,6 +498,12 @@ type Cmdable interface { GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd + + ACLDryRun(ctx context.Context, username string, command ...interface{}) *StringCmd + ACLLog(ctx context.Context, count int64) *ACLLogCmd + ACLLogReset(ctx context.Context) *StatusCmd + + ModuleLoadex(ctx context.Context, conf *ModuleLoadexConfig) *StringCmd } type StatefulCmdable interface { @@ -404,6 +513,7 @@ type StatefulCmdable interface { Select(ctx context.Context, index int) *StatusCmd SwapDB(ctx context.Context, index1, index2 int) *StatusCmd ClientSetName(ctx context.Context, name string) *BoolCmd + Hello(ctx context.Context, ver int, username, password, clientName string) *MapStringInterfaceCmd } var ( @@ -460,6 +570,26 @@ func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCm return cmd } +// Hello Set the resp protocol used. +func (c statefulCmdable) Hello(ctx context.Context, + ver int, username, password, clientName string) *MapStringInterfaceCmd { + args := make([]interface{}, 0, 7) + args = append(args, "hello", ver) + if password != "" { + if username != "" { + args = append(args, "auth", username, password) + } else { + args = append(args, "auth", "default", password) + } + } + if clientName != "" { + args = append(args, "setname", clientName) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + //------------------------------------------------------------------------------ func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { @@ -468,6 +598,50 @@ func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { return cmd } +// FilterBy is used for the `CommandList` command parameter. +type FilterBy struct { + Module string + ACLCat string + Pattern string +} + +func (c cmdable) CommandList(ctx context.Context, filter *FilterBy) *StringSliceCmd { + args := make([]interface{}, 0, 5) + args = append(args, "command", "list") + if filter != nil { + if filter.Module != "" { + args = append(args, "filterby", "module", filter.Module) + } else if filter.ACLCat != "" { + args = append(args, "filterby", "aclcat", filter.ACLCat) + } else if filter.Pattern != "" { + args = append(args, "filterby", "pattern", filter.Pattern) + } + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeys(ctx context.Context, commands ...interface{}) *StringSliceCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeys" + copy(args[2:], commands) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) CommandGetKeysAndFlags(ctx context.Context, commands ...interface{}) *KeyFlagsCmd { + args := make([]interface{}, 2+len(commands)) + args[0] = "command" + args[1] = "getkeysandflags" + copy(args[2:], commands) + cmd := NewKeyFlagsCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + // ClientGetName returns the name of the connection. func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { cmd := NewStringCmd(ctx, "client", "getname") @@ -572,6 +746,12 @@ func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCm return cmd } +func (c cmdable) ExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "expiretime", key) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { cmd := NewStringSliceCmd(ctx, "keys", pattern) _ = c(ctx, cmd) @@ -640,6 +820,12 @@ func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolC return cmd } +func (c cmdable) PExpireTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pexpiretime", key) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) _ = c(ctx, cmd) @@ -697,8 +883,9 @@ type Sort struct { Alpha bool } -func (sort *Sort) args(key string) []interface{} { - args := []interface{}{"sort", key} +func (sort *Sort) args(command, key string) []interface{} { + args := []interface{}{command, key} + if sort.By != "" { args = append(args, "by", sort.By) } @@ -717,14 +904,20 @@ func (sort *Sort) args(key string) []interface{} { return args } +func (c cmdable) SortRO(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args("sort_ro", key)...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { - cmd := NewStringSliceCmd(ctx, sort.args(key)...) + cmd := NewStringSliceCmd(ctx, sort.args("sort", key)...) _ = c(ctx, cmd) return cmd } func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { - args := sort.args(key) + args := sort.args("sort", key) if store != "" { args = append(args, "store", store) } @@ -734,7 +927,7 @@ func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) * } func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { - cmd := NewSliceCmd(ctx, sort.args(key)...) + cmd := NewSliceCmd(ctx, sort.args("sort", key)...) _ = c(ctx, cmd) return cmd } @@ -859,6 +1052,7 @@ func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { // - MSet("key1", "value1", "key2", "value2") // - MSet([]string{"key1", "value1", "key2", "value2"}) // - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSet(struct), For struct types, see HSet description. func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { args := make([]interface{}, 1, 1+len(values)) args[0] = "mset" @@ -872,6 +1066,7 @@ func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { // - MSetNX("key1", "value1", "key2", "value2") // - MSetNX([]string{"key1", "value1", "key2", "value2"}) // - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +// - MSetNX(struct), For struct types, see HSet description. func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { args := make([]interface{}, 1, 1+len(values)) args[0] = "msetnx" @@ -882,7 +1077,7 @@ func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { } // Set Redis `SET key value [expiration]` command. -// Use expiration for `SETEX`-like behavior. +// Use expiration for `SETEx`-like behavior. // // Zero expiration means the key has no expiration time. // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, @@ -958,8 +1153,8 @@ func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a S return cmd } -// SetEX Redis `SETEX key expiration value` command. -func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { +// SetEx Redis `SETEx key expiration value` command. +func (c cmdable) SetEx(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) _ = c(ctx, cmd) return cmd @@ -1103,6 +1298,8 @@ func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntC return c.bitOp(ctx, "not", destKey, key) } +// BitPos is an API before Redis version 7.0, cmd: bitpos key bit start end +// if you need the `byte | bit` parameter, please use `BitPosSpan`. func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { args := make([]interface{}, 3+len(pos)) args[0] = "bitpos" @@ -1123,6 +1320,18 @@ func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64 return cmd } +// BitPosSpan supports the `byte | bit` parameters in redis version 7.0, +// the bitpos command defaults to using byte type for the `start-end` range, +// which means it counts in bytes from start to end. you can set the value +// of "span" to determine the type of `start-end`. +// span = "bit", cmd: bitpos key bit start end bit +// span = "byte", cmd: bitpos key bit start end byte +func (c cmdable) BitPosSpan(ctx context.Context, key string, bit int8, start, end int64, span string) *IntCmd { + cmd := NewIntCmd(ctx, "bitpos", key, bit, start, end, span) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { a := make([]interface{}, 0, 2+len(args)) a = append(a, "bitfield") @@ -1229,8 +1438,8 @@ func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { return cmd } -func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { - cmd := NewStringStringMapCmd(ctx, "hgetall", key) +func (c cmdable) HGetAll(ctx context.Context, key string) *MapStringStringCmd { + cmd := NewMapStringStringCmd(ctx, "hgetall", key) _ = c(ctx, cmd) return cmd } @@ -1274,11 +1483,29 @@ func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *Slice } // HSet accepts values in following formats: +// // - HSet("myhash", "key1", "value1", "key2", "value2") +// // - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// // - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) // -// Note that it requires Redis v4 for multiple field/value pairs support. +// Playing struct With "redis" tag. +// type MyHash struct { Key1 string `redis:"key1"`; Key2 int `redis:"key2"` } +// +// - HSet("myhash", MyHash{"value1", "value2"}) Warn: redis-server >= 4.0 +// +// For struct, can be a structure pointer type, we only parse the field whose tag is redis. +// if you don't want the field to be read, you can use the `redis:"-"` flag to ignore it, +// or you don't need to set the redis tag. +// For the type of structure field, we only support simple data types: +// string, int/uint(8,16,32,64), float(32,64), time.Time(to RFC3339Nano), time.Duration(to Nanoseconds ), +// if you are other more complex or custom data types, please implement the encoding.BinaryMarshaler interface. +// +// Note that in older versions of Redis server(redis-server < 4.0), HSet only supports a single key-value pair. +// redis-docs: https://redis.io/commands/hset (Starting with Redis version 4.0.0: Accepts multiple field and value arguments.) +// If you are using a Struct type and the number of fields is greater than one, +// you will receive an error similar to "ERR wrong number of arguments", you can use HMSet as a substitute. func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(values)) args[0] = "hset" @@ -1313,16 +1540,15 @@ func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { } // HRandField redis-server version >= 6.2.0. -func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "hrandfield", key, count) - if withValues { - args = append(args, "withvalues") - } +func (c cmdable) HRandField(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hrandfield", key, count) + _ = c(ctx, cmd) + return cmd +} - cmd := NewStringSliceCmd(ctx, args...) +// HRandFieldWithValues redis-server version >= 6.2.0. +func (c cmdable) HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd { + cmd := NewKeyValueSliceCmd(ctx, "hrandfield", key, count, "withvalues") _ = c(ctx, cmd) return cmd } @@ -1342,6 +1568,21 @@ func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...strin return cmd } +func (c cmdable) BLMPop(ctx context.Context, timeout time.Duration, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "blmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { args := make([]interface{}, 1+len(keys)+1) args[0] = "brpop" @@ -1368,12 +1609,34 @@ func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, tim return cmd } +func (c cmdable) LCS(ctx context.Context, q *LCSQuery) *LCSCmd { + cmd := NewLCSCmd(ctx, q) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { cmd := NewStringCmd(ctx, "lindex", key, index) _ = c(ctx, cmd) return cmd } +// LMPop Pops one or more elements from the first non-empty list key from the list of provided key names. +// direction: left or right, count: > 0 +// example: client.LMPop(ctx, "left", 3, "key1", "key2") +func (c cmdable) LMPop(ctx context.Context, direction string, count int64, keys ...string) *KeyValuesCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "lmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(direction), "count", count) + cmd := NewKeyValuesCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) _ = c(ctx, cmd) @@ -1602,6 +1865,22 @@ func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { return cmd } +func (c cmdable) SInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "sintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { args := make([]interface{}, 2+len(keys)) args[0] = "sinterstore" @@ -1725,11 +2004,7 @@ type XAddArgs struct { Stream string NoMkStream bool MaxLen int64 // MAXLEN N - - // Deprecated: use MaxLen+Approx, remove in v9. - MaxLenApprox int64 // MAXLEN ~ N - - MinID string + MinID string // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). Approx bool Limit int64 @@ -1737,8 +2012,6 @@ type XAddArgs struct { Values interface{} } -// XAdd a.Limit has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { args := make([]interface{}, 0, 11) args = append(args, "xadd", a.Stream) @@ -1752,9 +2025,6 @@ func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { } else { args = append(args, "maxlen", a.MaxLen) } - case a.MaxLenApprox > 0: - // TODO remove in v9. - args = append(args, "maxlen", "~", a.MaxLenApprox) case a.MinID != "": if a.Approx { args = append(args, "minid", "~", a.MinID) @@ -2049,8 +2319,10 @@ func xClaimArgs(a *XClaimArgs) []interface{} { // xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). // example: -// XTRIM key MAXLEN/MINID threshold LIMIT limit. -// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// // The redis-server version is lower than 6.2, please set limit to 0. func (c cmdable) xTrim( ctx context.Context, key, strategy string, @@ -2070,38 +2342,20 @@ func (c cmdable) xTrim( return cmd } -// Deprecated: use XTrimMaxLen, remove in v9. -func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) -} - -// Deprecated: use XTrimMaxLenApprox, remove in v9. -func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { - return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) -} - // XTrimMaxLen No `~` rules are used, `limit` cannot be used. // cmd: XTRIM key MAXLEN maxLen func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) } -// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) } -// XTrimMinID No `~` rules are used, `limit` cannot be used. -// cmd: XTRIM key MINID minID func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { return c.xTrim(ctx, key, "minid", false, minID, 0) } -// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. -// issue: https://github.com/redis/redis/issues/9046 -// cmd: XTRIM key MINID ~ minID LIMIT limit func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { return c.xTrim(ctx, key, "minid", true, minID, limit) } @@ -2214,6 +2468,26 @@ func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...st return cmd } +// BZMPop is the blocking variant of ZMPOP. +// When any of the sorted sets contains elements, this command behaves exactly like ZMPOP. +// When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the timeout elapses. +// A timeout of zero can be used to block indefinitely. +// example: client.BZMPop(ctx, 0,"max", 1, "set") +func (c cmdable) BZMPop(ctx context.Context, timeout time.Duration, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 3+len(keys), 6+len(keys)) + args[0] = "bzmpop" + args[1] = formatSec(ctx, timeout) + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + // ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. type ZAddArgs struct { NX bool @@ -2266,116 +2540,42 @@ func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *F return cmd } -// TODO: Compatible with v8 api, will be removed in v9. -func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { - args.Members = make([]Z, len(members)) - for i, m := range members { - args.Members[i] = *m - } - cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) - _ = c(ctx, cmd) - return cmd -} - // ZAdd Redis `ZADD key score member [score member ...]` command. -func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{}, members...) +func (c cmdable) ZAdd(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + Members: members, + }) } -// ZAddNX Redis `ZADD key NX score member [score member ...]` command. -func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - }, members...) +// ZAddLT Redis `ZADD key LT score member [score member ...]` command. +func (c cmdable) ZAddLT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + LT: true, + Members: members, + }) } -// ZAddXX Redis `ZADD key XX score member [score member ...]` command. -func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - }, members...) -} - -// ZAddCh Redis `ZADD key CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - Ch: true, - }, members...) -} - -// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// NX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - NX: true, - Ch: true, - }, members...) -} - -// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. -// Deprecated: Use -// client.ZAddArgs(ctx, ZAddArgs{ -// XX: true, -// Ch: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { - return c.zAdd(ctx, key, ZAddArgs{ - XX: true, - Ch: true, - }, members...) -} - -// ZIncr Redis `ZADD key INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ - Members: []Z{*member}, +// ZAddGT Redis `ZADD key GT score member [score member ...]` command. +func (c cmdable) ZAddGT(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ + GT: true, + Members: members, }) } -// ZIncrNX Redis `ZADD key NX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// NX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ +// ZAddNX Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ NX: true, - Members: []Z{*member}, + Members: members, }) } -// ZIncrXX Redis `ZADD key XX INCR score member` command. -// Deprecated: Use -// client.ZAddArgsIncr(ctx, ZAddArgs{ -// XX: true, -// Members: []Z, -// }) -// remove in v9. -func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { - return c.ZAddArgsIncr(ctx, key, ZAddArgs{ +// ZAddXX Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...Z) *IntCmd { + return c.ZAddArgs(ctx, key, ZAddArgs{ XX: true, - Members: []Z{*member}, + Members: members, }) } @@ -2434,6 +2634,38 @@ func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd return cmd } +func (c cmdable) ZInterCard(ctx context.Context, limit int64, keys ...string) *IntCmd { + args := make([]interface{}, 4+len(keys)) + args[0] = "zintercard" + numkeys := int64(0) + for i, key := range keys { + args[2+i] = key + numkeys++ + } + args[1] = numkeys + args[2+numkeys] = "limit" + args[3+numkeys] = limit + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZMPop Pops one or more elements with the highest or lowest score from the first non-empty sorted set key from the list of provided key names. +// direction: "max" (highest score) or "min" (lowest score), count: > 0 +// example: client.ZMPop(ctx, "max", 5, "set1", "set2") +func (c cmdable) ZMPop(ctx context.Context, order string, count int64, keys ...string) *ZSliceWithKeyCmd { + args := make([]interface{}, 2+len(keys), 5+len(keys)) + args[0] = "zmpop" + args[1] = len(keys) + for i, key := range keys { + args[2+i] = key + } + args = append(args, strings.ToLower(order), "count", count) + cmd := NewZSliceWithKeyCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { args := make([]interface{}, 2+len(members)) args[0] = "zmscore" @@ -2488,11 +2720,13 @@ func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSlic // ZRangeArgs is all the options of the ZRange command. // In version> 6.2.0, you can replace the(cmd): -// ZREVRANGE, -// ZRANGEBYSCORE, -// ZREVRANGEBYSCORE, -// ZRANGEBYLEX, -// ZREVRANGEBYLEX. +// +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// // Please pay attention to your redis-server version. // // Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. @@ -2655,6 +2889,14 @@ func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { return cmd } +// ZRankWithScore according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. +func (c cmdable) ZRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { args := make([]interface{}, 2, 2+len(members)) args[0] = "zrem" @@ -2695,6 +2937,8 @@ func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) * return cmd } +// ZRevRangeWithScores according to the Redis documentation, if member does not exist +// in the sorted set or key does not exist, it will return a redis.Nil error. func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") _ = c(ctx, cmd) @@ -2745,6 +2989,12 @@ func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { return cmd } +func (c cmdable) ZRevRankWithScore(ctx context.Context, key, member string) *RankWithScoreCmd { + cmd := NewRankWithScoreCmd(ctx, "zrevrank", key, member, "withscore") + _ = c(ctx, cmd) + return cmd +} + func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { cmd := NewFloatCmd(ctx, "zscore", key, member) _ = c(ctx, cmd) @@ -2783,16 +3033,15 @@ func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *I } // ZRandMember redis-server version >= 6.2.0. -func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { - args := make([]interface{}, 0, 4) - - // Although count=0 is meaningless, redis accepts count=0. - args = append(args, "zrandmember", key, count) - if withScores { - args = append(args, "withscores") - } +func (c cmdable) ZRandMember(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrandmember", key, count) + _ = c(ctx, cmd) + return cmd +} - cmd := NewStringSliceCmd(ctx, args...) +// ZRandMemberWithScores redis-server version >= 6.2.0. +func (c cmdable) ZRandMemberWithScores(ctx context.Context, key string, count int) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrandmember", key, count, "withscores") _ = c(ctx, cmd) return cmd } @@ -2897,7 +3146,7 @@ func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { // ClientKillByFilter is new style syntax, while the ClientKill is old // -// CLIENT KILL