Documentation ¶
Index ¶
- Constants
- Variables
- func BodyAttachments(body Body) map[string]interface{}
- func ClockMatches(a, b []uint64) bool
- func ConnectToBucket(spec base.BucketSpec, callback sgbucket.BucketNotifyFn) (bucket base.Bucket, err error)
- func GenerateBitFlagIndex(sequence uint64) uint16
- func GenerateBlockKey(channelName string, sequence uint64, partition uint16) string
- func GenerateBlockKeys(channelName string, minSeq uint64, maxSeq uint64, partition uint16) []string
- func GenerateProofOfAttachment(attachmentData []byte) (nonce []byte, proof string)
- func GetChannelClockKey(channelName string) string
- func GetIndexBlockKey(channelName string, blockIndex uint16, partition uint16) string
- func GetStringArrayProperty(body Body, property string) ([]string, error)
- func IsNotFoundError(err error) bool
- func NewSequenceHasher(options *SequenceHashOptions) (*sequenceHasher, error)
- func ParseIntSequenceComponent(component string, allowEmpty bool) (uint64, error)
- func ParseRevID(revid string) (int, string)
- func ParseRevisions(body Body) []string
- func ReadJSONFromMIME(headers http.Header, input io.Reader, into interface{}) error
- func SearchSequenceQueue(a SkippedSequenceQueue, x uint64) int
- func SearchSortedEntrySet(a SortedEntrySet, x uint64) int
- func UnmarshalDocumentSyncData(data []byte, needHistory bool) (*syncData, error)
- func UnmarshalDocumentSyncDataFromFeed(data []byte, dataType uint8, needHistory bool) (result *syncData, rawBody []byte, err error)
- func VacuumAttachments(bucket base.Bucket) (int, error)
- func ValidateDatabaseName(dbName string) error
- func VectorChangesBefore(c1, c2 *ChangeEntry) bool
- func VectorChangesEquals(c1, c2 *ChangeEntry) bool
- type AllChannelStats
- type AsyncEvent
- type AsyncEventHandler
- type AttachmentCallback
- type AttachmentData
- type AttachmentKey
- type BitFlagBlock
- func (b *BitFlagBlock) AddEntry(entry *LogEntry) error
- func (b *BitFlagBlock) AddEntrySet(entries []*LogEntry) error
- func (b *BitFlagBlock) Cas() uint64
- func (b *BitFlagBlock) GetAllEntries() []*LogEntry
- func (b *BitFlagBlock) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string)
- func (b *BitFlagBlock) Key() string
- func (b *BitFlagBlock) Marshal() ([]byte, error)
- func (b *BitFlagBlock) SetCas(cas uint64)
- func (b *BitFlagBlock) Unmarshal(value []byte) error
- type BitFlagBlockData
- type BitFlagBufferBlock
- func (b *BitFlagBufferBlock) AddEntry(entry *LogEntry) error
- func (b *BitFlagBufferBlock) Cas() uint64
- func (b *BitFlagBufferBlock) GetAllEntries() []*LogEntry
- func (b *BitFlagBufferBlock) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string)
- func (b *BitFlagBufferBlock) Key() string
- func (b *BitFlagBufferBlock) Marshal() ([]byte, error)
- func (b *BitFlagBufferBlock) SetCas(cas uint64)
- func (b *BitFlagBufferBlock) Unmarshal(value []byte) error
- type BitFlagStorage
- func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (partitionUpdates []*base.PartitionClock, err error)
- func (b *BitFlagStorage) GetChanges(fromSeq base.SequenceClock, toSeq base.SequenceClock, limit int) ([]*LogEntry, error)
- func (b *BitFlagStorage) ReadLogEntry(vbNo uint16, sequence uint64) (*LogEntry, error)
- func (b *BitFlagStorage) RollbackTo(rollbackVbNo uint16, rollbackSeq uint64) error
- func (b *BitFlagStorage) StoresLogEntries() bool
- func (b *BitFlagStorage) UpdateCache(sinceClock base.SequenceClock, toClock base.SequenceClock, ...) error
- func (b *BitFlagStorage) WriteLogEntry(entry *LogEntry) error
- type BlockSet
- type Body
- type CacheOptions
- type ChangeEntry
- type ChangeIndex
- type ChangeIndexOptions
- type ChangeRev
- type ChangesOptions
- type ChannelCacheOptions
- type ChannelFeedType
- type ChannelIndex
- type ChannelIndexStats
- type ChannelPollingStats
- type ChannelStats
- type ChannelStorage
- type ChannelStorageReader
- type ChannelStorageWriter
- type DBOnlineCallback
- type DBStateChangeEvent
- type Database
- func (db *Database) AddDocInstanceToChangeEntry(entry *ChangeEntry, doc *document, options ChangesOptions)
- func (db *Database) AddDocToChangeEntry(entry *ChangeEntry, options ChangesOptions)
- func (db *Database) AuthorizeDocID(docid, revid string) error
- func (db *Database) Compact() (int, error)
- func (db *Database) DeleteAllDocs(docType string) error
- func (db *Database) DeleteDesignDoc(ddocName string) (err error)
- func (db *Database) DeleteDoc(docid string, revid string) (string, error)
- func (db *Database) DeleteSpecial(doctype string, docid string, revid string) error
- func (db *Database) DocCount() int
- func (db *Database) ForEachDocID(callback ForEachDocIDFunc, resultsOpts ForEachDocIDOptions) error
- func (db *Database) ForEachStubAttachment(body Body, minRevpos int, callback AttachmentCallback) error
- func (db *Database) Get(docid string) (Body, error)
- func (db *Database) GetAttachment(key AttachmentKey) ([]byte, error)
- func (db *Database) GetChangeLog(channelName string, afterSeq uint64) []*LogEntry
- func (db *Database) GetChanges(channels base.Set, options ChangesOptions) ([]*ChangeEntry, error)
- func (db *Database) GetDesignDoc(ddocName string, result interface{}) (err error)
- func (db *Database) GetDocAndActiveRev(docid string) (populatedDoc *document, body Body, err error)
- func (db *Database) GetRev(docid, revid string, history bool, attachmentsSince []string) (Body, error)
- func (db *Database) GetRevAndChannels(docid string, revid string, listRevisions bool) (body Body, channels channels.ChannelMap, access UserAccessMap, ...)
- func (db *Database) GetRevWithHistory(docid, revid string, maxHistory int, historyFrom []string, ...) (Body, error)
- func (db *Database) GetSpecial(doctype string, docid string) (Body, error)
- func (db *Database) ImportDoc(docid string, body Body, isDelete bool) (docOut *document, err error)
- func (db *Database) ImportRawDoc(docid string, value []byte, isDelete bool) (docOut *document, err error)
- func (db *Database) MultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
- func (db *Database) Post(body Body) (string, string, error)
- func (db *Database) Purge(key string) error
- func (db *Database) Put(docid string, body Body) (newRevID string, err error)
- func (db *Database) PutDesignDoc(ddocName string, ddoc sgbucket.DesignDoc) (err error)
- func (db *Database) PutExistingRev(docid string, body Body, docHistory []string) error
- func (db *Database) PutSpecial(doctype string, docid string, body Body) (string, error)
- func (db *Database) QueryDesignDoc(ddocName string, viewName string, options map[string]interface{}) (*sgbucket.ViewResult, error)
- func (db *Database) ReloadUser() error
- func (db *Database) RevDiff(docid string, revids []string) (missing, possible []string)
- func (db *Database) SameAs(otherdb *Database) bool
- func (db *Database) SimpleMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
- func (db *Database) UpdateAllDocChannels(doCurrentDocs bool, doImportDocs bool) (int, error)
- func (db *Database) User() auth.User
- func (db *Database) VectorMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
- func (db *Database) WriteMultipartDocument(body Body, writer *multipart.Writer, compress bool)
- func (db *Database) WriteRevisionAsPart(revBody Body, isError bool, compressPart bool, writer *multipart.Writer) error
- type DatabaseContext
- func (db *DatabaseContext) AllPrincipalIDs() (users, roles []string, err error)
- func (context *DatabaseContext) Authenticator() *auth.Authenticator
- func (context *DatabaseContext) Close()
- func (context *DatabaseContext) ComputeChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
- func (context *DatabaseContext) ComputeRolesForUser(user auth.User) (channels.TimedSet, error)
- func (context *DatabaseContext) ComputeSequenceChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
- func (context *DatabaseContext) ComputeSequenceRolesForUser(user auth.User) (channels.TimedSet, error)
- func (context *DatabaseContext) ComputeVbSequenceChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
- func (context *DatabaseContext) ComputeVbSequenceRolesForUser(user auth.User) (channels.TimedSet, error)
- func (db *DatabaseContext) DeleteUserSessions(userName string) error
- func (context *DatabaseContext) GetChangeIndex() ChangeIndex
- func (db *DatabaseContext) GetDoc(docid string) (doc *document, err error)
- func (db *DatabaseContext) GetDocSyncData(docid string) (syncData, error)
- func (context *DatabaseContext) GetIndexBucket() base.Bucket
- func (context *DatabaseContext) GetOIDCProvider(providerName string) (*auth.OIDCProvider, error)
- func (dbc *DatabaseContext) GetPrincipal(name string, isUser bool) (info *PrincipalConfig, err error)
- func (context *DatabaseContext) GetStableClock() (clock base.SequenceClock, err error)
- func (context *DatabaseContext) GetUserViewsEnabled() bool
- func (db *DatabaseContext) IndexAllChannelStats() ([]*ChannelStats, error)
- func (db *DatabaseContext) IndexChannelStats(channelName string) (*ChannelStats, error)
- func (db *DatabaseContext) IndexStats() (indexStats *IndexStats, err error)
- func (context *DatabaseContext) IsClosed() bool
- func (context *DatabaseContext) LastSequence() (uint64, error)
- func (context *DatabaseContext) NotifyUser(username string)
- func (dbc *DatabaseContext) ParseSequenceID(str string) (s SequenceID, err error)
- func (context *DatabaseContext) ReserveSequences(numToReserve uint64) error
- func (context *DatabaseContext) RestartListener() error
- func (context *DatabaseContext) SetOnChangeCallback(callback DocChangedFunc)
- func (context *DatabaseContext) SetUserViewsEnabled(value bool)
- func (dc *DatabaseContext) TakeDbOffline(reason string) error
- func (context *DatabaseContext) TapListener() changeListener
- func (dbc *DatabaseContext) UpdatePrincipal(newInfo PrincipalConfig, isUser bool, allowReplace bool) (replaced bool, err error)
- func (context *DatabaseContext) UpdateSyncFun(syncFun string) (changed bool, err error)
- func (context *DatabaseContext) UseGlobalSequence() bool
- func (context *DatabaseContext) UseXattrs() bool
- func (context *DatabaseContext) WaitForPendingChanges() (err error)
- func (context *DatabaseContext) WaitForSequence(sequence uint64) (err error)
- func (context *DatabaseContext) WaitForSequenceWithMissing(sequence uint64) (err error)
- type DatabaseContextOptions
- type DenseBlock
- func (d *DenseBlock) AddEntrySet(entries []*LogEntry, bucket base.Bucket) (overflow []*LogEntry, pendingRemoval []*LogEntry, ...)
- func (d *DenseBlock) Count() uint16
- func (d *DenseBlock) GetAllEntries() []*LogEntry
- func (d *DenseBlock) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string)
- func (d *DenseBlock) GetEntry(position int64, length uint16) (entry DenseBlockDataEntry)
- func (d *DenseBlock) GetIndexEntry(position int64) (indexEntry DenseBlockIndexEntry)
- func (d *DenseBlock) MakeLogEntry(indexEntry DenseBlockIndexEntry, entry DenseBlockDataEntry) *LogEntry
- func (d *DenseBlock) MarkInactive() error
- func (d *DenseBlock) RemoveEntrySet(entries []*LogEntry, bucket base.Bucket) (pendingRemoval []*LogEntry, err error)
- func (d *DenseBlock) RollbackTo(rollbackVbNo uint16, rollbackSeq uint64, bucket base.Bucket) (rollbackComplete bool, err error)
- type DenseBlockDataEntry
- type DenseBlockEntry
- type DenseBlockIndexEntry
- type DenseBlockIterator
- type DenseBlockList
- func (l *DenseBlockList) ActiveListEntry() *DenseBlockListEntry
- func (l *DenseBlockList) AddBlock() (*DenseBlock, error)
- func (l *DenseBlockList) GetActiveBlock() *DenseBlock
- func (l *DenseBlockList) LoadBlock(listEntry DenseBlockListEntry) *DenseBlock
- func (l *DenseBlockList) LoadPrevious() error
- func (l *DenseBlockList) PreviousBlock(currentBlockIndex int) (*DenseBlockListEntry, error)
- func (l *DenseBlockList) ReloadDenseBlockList() (found bool, err error)
- func (l *DenseBlockList) ValidFrom() base.PartitionClock
- type DenseBlockListEntry
- type DenseBlockListStorage
- type DensePartitionStorageReader
- type DensePartitionStorageReaderNonCaching
- type DenseStorageReader
- type DocChangedFunc
- type DocumentChangeEvent
- type Event
- type EventHandler
- type EventManager
- func (em *EventManager) HasHandlerForEvent(eventType EventType) bool
- func (em *EventManager) ProcessEvent(event Event)
- func (em *EventManager) RaiseDBStateChangeEvent(dbName string, state string, reason string, adminInterface string) error
- func (em *EventManager) RaiseDocumentChangeEvent(body Body, oldBodyJSON string, channels base.Set) error
- func (em *EventManager) RegisterEventHandler(handler EventHandler, eventType EventType)
- func (em *EventManager) Start(maxProcesses uint, waitTime int)
- type EventType
- type ForEachDocIDFunc
- type ForEachDocIDOptions
- type IDAndRev
- type IndexBlock
- type IndexPartitionsFunc
- type IndexStats
- type IndexType
- type JSEventFunction
- type KvChannelIndex
- type LogEntries
- type LogEntry
- type LogPriorityQueue
- type OidcTestProviderOptions
- type PartitionChanges
- type PartitionMapStats
- type PartitionStats
- type PrincipalConfig
- type ResponseType
- type RevInfo
- type RevKey
- type RevTree
- type RevisionCache
- type RevisionCacheLoaderFunc
- type SeqHashCacheLoaderFunc
- type SequenceHashOptions
- type SequenceID
- func (s SequenceID) Before(s2 SequenceID) bool
- func (s SequenceID) Equals(s2 SequenceID) bool
- func (s SequenceID) IsNonZero() bool
- func (s SequenceID) MarshalJSON() ([]byte, error)
- func (s SequenceID) Print() string
- func (s SequenceID) SafeSequence() uint64
- func (s SequenceID) String() string
- func (s *SequenceID) UnmarshalJSON(data []byte) error
- func (s SequenceID) VbucketSequenceAfter(vbNo uint16, seq uint64) bool
- func (s SequenceID) VbucketSequenceBefore(vbNo uint16, seq uint64) bool
- type SequenceType
- type Shadower
- type SkippedSequence
- type SkippedSequenceQueue
- type SortedEntrySet
- type Statistics
- type UnsupportedOptions
- type UserAccessMap
- type UserViewsOptions
- type ViewDoc
- type Webhook
Constants ¶
const ( DefaultCachePendingSeqMaxNum = 10000 // Max number of waiting sequences DefaultCachePendingSeqMaxWait = 5 * time.Second // Max time we'll wait for a pending sequence before sending to missed queue DefaultSkippedSeqMaxWait = 60 * time.Minute // Max time we'll wait for an entry in the missing before purging )
const ( WaiterClosed uint32 = iota WaiterHasChanges WaiterCheckTerminated )
const ( BackfillFlag_None backfillFlag = iota BackfillFlag_Pending BackfillFlag_Complete )
const ( DBOffline uint32 = iota DBStarting DBOnline DBStopping DBResyncing )
const ( DefaultRevsLimit = 1000 DefaultUseXattrs = false // Whether Sync Gateway uses xattrs for metadata storage, if not specified in the config KSyncKeyPrefix = "_sync:" // All special/internal documents the gateway creates have this prefix in their keys. KSyncXattrName = "_sync" // Name of XATTR used to store sync metadata )
const ( DesignDocSyncGateway = "sync_gateway" DesignDocSyncHousekeeping = "sync_housekeeping" ViewPrincipals = "principals" ViewChannels = "channels" ViewAccess = "access" ViewAccessVbSeq = "access_vbseq" ViewRoleAccess = "role_access" ViewRoleAccessVbSeq = "role_access_vbseq" ViewAllBits = "all_bits" ViewAllDocs = "all_docs" ViewImport = "import" ViewOldRevs = "old_revs" ViewSessions = "sessions" )
const ( Seq_NotInChannel = iota Seq_InChannel Seq_Removed )
Bit flag values
const ( KeyFormat_DenseBlockList = "%s:blist%d:p%d:%s" // base.KIndexPrefix, list index, partition, channelname KeyFormat_DenseBlockListActive = "%s:blist:p%d:%s" // base.KIndexPrefix, partition, channelname KeyFormat_DenseBlock = "%s:block%d:p%d:%s" // base.KIndexPrefix, block index, partition, channelname )
const ( UnusedSequenceKeyPrefix = "_sync:unusedSeq:" // Prefix for unused sequence documents UnusedSequenceTTL = 10 * 60 // 10 minute expiry for unused sequence docs )
const ( Undefined = SequenceType(iota) IntSequenceType ClockSequenceType )
const DB_HEADER_LEN = 2 // Length of block header. Currently only contains entry count
const DENSE_BLOCK_ENTRY_FIXED_LEN = 3 // Length of fixed length components (flags, keylen)
const INDEX_ENTRY_LEN = 12
const KDefaultRevisionCacheCapacity = 5000
Number of recently-accessed doc revisions to cache in RAM
const NoSeq = uint64(0x7FFFFFFFFFFFFFFF)
Variables ¶
var ( DefaultChannelCacheMinLength = 50 // Keep at least this many entries in cache DefaultChannelCacheMaxLength = 500 // Don't put more than this many entries in cache DefaultChannelCacheAge = 60 * time.Second // Keep entries at least this long )
var ByteCachePollingTime = 1000 // initial polling time for notify, ms
var EnableStarChannelLog = true
Enable keeping a channel-log for the "*" channel (channel.UserStarChannel). The only time this channel is needed is if someone has access to "*" (e.g. admin-party) and tracks its changes feed.
var IndexExpvars *expvar.Map
var MaxBlockSize = 10000 // Maximum size of index block, in bytes
var MaxListBlockCount = 1000 // When the number of blocks in the active list exceeds MaxListBlockCount, it's rotated
Using var instead of const to simplify testing
var MaxSequenceID = SequenceID{ Seq: math.MaxUint64, // contains filtered or unexported fields }
var RunStateString = []string{ DBOffline: "Offline", DBStarting: "Starting", DBOnline: "Online", DBStopping: "Stopping", DBResyncing: "Resyncing", }
Functions ¶
func BodyAttachments ¶
func ClockMatches ¶
func ConnectToBucket ¶
func ConnectToBucket(spec base.BucketSpec, callback sgbucket.BucketNotifyFn) (bucket base.Bucket, err error)
Helper function to open a Couchbase connection and return a specific bucket.
func GenerateBitFlagIndex ¶
func GenerateBlockKey ¶
func GenerateBlockKeys ¶
Returns the set of all block keys required to return sequences from minSeq to maxSeq for the channel, partition
func GetChannelClockKey ¶
Get the key for the cache block, based on the block index
func GetIndexBlockKey ¶
Get the key for the cache block, based on the block index
func GetStringArrayProperty ¶
func IsNotFoundError ¶
func NewSequenceHasher ¶
func NewSequenceHasher(options *SequenceHashOptions) (*sequenceHasher, error)
Creates a new sequenceHasher using 2^exp as mod.
func ParseRevID ¶
Splits a revision ID into generation number and hex digest.
func ParseRevisions ¶
Parses a CouchDB _rev or _revisions property into a list of revision IDs
func ReadJSONFromMIME ¶
Parses a JSON MIME body, unmarshaling it into "into".
func SearchSequenceQueue ¶
func SearchSequenceQueue(a SkippedSequenceQueue, x uint64) int
Skipped Sequence version of sort.SearchInts - based on http://golang.org/src/sort/search.go?s=2959:2994#L73
func SearchSortedEntrySet ¶
func SearchSortedEntrySet(a SortedEntrySet, x uint64) int
Skipped Sequence version of sort.SearchInts - based on http://golang.org/src/sort/search.go?s=2959:2994#L73
func UnmarshalDocumentSyncData ¶
Unmarshals just a document's sync metadata from JSON data. (This is somewhat faster, if all you need is the sync data without the doc body.)
func UnmarshalDocumentSyncDataFromFeed ¶
func UnmarshalDocumentSyncDataFromFeed(data []byte, dataType uint8, needHistory bool) (result *syncData, rawBody []byte, err error)
Unmarshals sync metadata for a document arriving via DCP. Includes handling for xattr content being included in data. If not present in either xattr or document body, returns nil but no error. Returns the raw body, in case it's needed for import.
func VacuumAttachments ¶
Deletes all orphaned CouchDB attachments not used by any revisions.
func ValidateDatabaseName ¶
func VectorChangesBefore ¶
func VectorChangesBefore(c1, c2 *ChangeEntry) bool
Determine which of two changes entries should be processed first, considering backfill, triggered by and sequence values
func VectorChangesEquals ¶
func VectorChangesEquals(c1, c2 *ChangeEntry) bool
Determine whether two changes entries are equivalent, considering backfill flags along with sequence values
Types ¶
type AllChannelStats ¶
type AllChannelStats struct {
Channels []ChannelStats `json:"channels"`
}
type AsyncEvent ¶
type AsyncEvent struct { }
Currently the AsyncEvent type only manages the Synchronous() check. Future enhancements around async processing would leverage this type.
func (AsyncEvent) Synchronous ¶
func (ae AsyncEvent) Synchronous() bool
type AsyncEventHandler ¶
type AsyncEventHandler struct{}
type AttachmentCallback ¶
type AttachmentData ¶
type AttachmentData map[AttachmentKey][]byte
type BitFlagBlock ¶
type BitFlagBlock struct {
// contains filtered or unexported fields
}
func (*BitFlagBlock) AddEntry ¶
func (b *BitFlagBlock) AddEntry(entry *LogEntry) error
func (*BitFlagBlock) AddEntrySet ¶
func (b *BitFlagBlock) AddEntrySet(entries []*LogEntry) error
func (*BitFlagBlock) Cas ¶
func (b *BitFlagBlock) Cas() uint64
func (*BitFlagBlock) GetAllEntries ¶
func (b *BitFlagBlock) GetAllEntries() []*LogEntry
func (*BitFlagBlock) GetEntries ¶
func (b *BitFlagBlock) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string)
Block entry retrieval - used by GetEntries and GetEntriesAndKeys.
func (*BitFlagBlock) Key ¶
func (b *BitFlagBlock) Key() string
func (*BitFlagBlock) Marshal ¶
func (b *BitFlagBlock) Marshal() ([]byte, error)
func (*BitFlagBlock) SetCas ¶
func (b *BitFlagBlock) SetCas(cas uint64)
func (*BitFlagBlock) Unmarshal ¶
func (b *BitFlagBlock) Unmarshal(value []byte) error
type BitFlagBlockData ¶
type BitFlagBlockData struct { MinSequence uint64 // Starting sequence Entries map[uint16][]byte // Contents of the cache block doc }
func (*BitFlagBlockData) MaxSequence ¶
func (b *BitFlagBlockData) MaxSequence() uint64
type BitFlagBufferBlock ¶
type BitFlagBufferBlock struct {
// contains filtered or unexported fields
}
func (*BitFlagBufferBlock) AddEntry ¶
func (b *BitFlagBufferBlock) AddEntry(entry *LogEntry) error
func (*BitFlagBufferBlock) Cas ¶
func (b *BitFlagBufferBlock) Cas() uint64
func (*BitFlagBufferBlock) GetAllEntries ¶
func (b *BitFlagBufferBlock) GetAllEntries() []*LogEntry
func (*BitFlagBufferBlock) GetEntries ¶
func (b *BitFlagBufferBlock) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string)
Block entry retrieval - used by GetEntries and GetEntriesAndKeys.
func (*BitFlagBufferBlock) Key ¶
func (b *BitFlagBufferBlock) Key() string
func (*BitFlagBufferBlock) Marshal ¶
func (b *BitFlagBufferBlock) Marshal() ([]byte, error)
func (*BitFlagBufferBlock) SetCas ¶
func (b *BitFlagBufferBlock) SetCas(cas uint64)
func (*BitFlagBufferBlock) Unmarshal ¶
func (b *BitFlagBufferBlock) Unmarshal(value []byte) error
type BitFlagStorage ¶
type BitFlagStorage struct {
// contains filtered or unexported fields
}
func NewBitFlagStorage ¶
func NewBitFlagStorage(bucket base.Bucket, channelName string, partitions *base.IndexPartitions) *BitFlagStorage
func (*BitFlagStorage) AddEntrySet ¶
func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (partitionUpdates []*base.PartitionClock, err error)
Adds a set
func (*BitFlagStorage) GetChanges ¶
func (b *BitFlagStorage) GetChanges(fromSeq base.SequenceClock, toSeq base.SequenceClock, limit int) ([]*LogEntry, error)
func (*BitFlagStorage) ReadLogEntry ¶
func (b *BitFlagStorage) ReadLogEntry(vbNo uint16, sequence uint64) (*LogEntry, error)
Reads a single entry from the index
func (*BitFlagStorage) RollbackTo ¶
func (b *BitFlagStorage) RollbackTo(rollbackVbNo uint16, rollbackSeq uint64) error
func (*BitFlagStorage) StoresLogEntries ¶
func (b *BitFlagStorage) StoresLogEntries() bool
func (*BitFlagStorage) UpdateCache ¶
func (b *BitFlagStorage) UpdateCache(sinceClock base.SequenceClock, toClock base.SequenceClock, changedPartitions []*base.PartitionRange) error
func (*BitFlagStorage) WriteLogEntry ¶
func (b *BitFlagStorage) WriteLogEntry(entry *LogEntry) error
type Body ¶
type Body map[string]interface{}
The body of a CouchDB document/revision as decoded from JSON.
func (Body) FixJSONNumbers ¶
func (body Body) FixJSONNumbers()
Version of FixJSONNumbers (see base/util.go) that operates on a Body
func (Body) ImmutableAttachmentsCopy ¶
func (Body) ShallowCopy ¶
type CacheOptions ¶
type ChangeEntry ¶
type ChangeEntry struct { Seq SequenceID `json:"seq"` ID string `json:"id"` Deleted bool `json:"deleted,omitempty"` Removed base.Set `json:"removed,omitempty"` Doc Body `json:"doc,omitempty"` Changes []ChangeRev `json:"changes"` Err error `json:"err,omitempty"` // Used to notify feed consumer of errors // contains filtered or unexported fields }
A changes entry; Database.GetChanges returns an array of these. Marshals into the standard CouchDB _changes format.
func (*ChangeEntry) SetBranched ¶
func (ce *ChangeEntry) SetBranched(isBranched bool)
func (*ChangeEntry) String ¶
func (ce *ChangeEntry) String() string
type ChangeIndex ¶
type ChangeIndex interface { // Initialize the index Init(context *DatabaseContext, lastSequence SequenceID, onChange func(base.Set), cacheOptions *CacheOptions, indexOptions *ChangeIndexOptions) error // Stop the index Stop() // Clear the index Clear() // Enable/Disable indexing EnableChannelIndexing(enable bool) // Retrieve changes in a channel GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) // Retrieve in-memory changes in a channel GetCachedChanges(channelName string, options ChangesOptions) (validFrom uint64, entries []*LogEntry) // Called to add a document to the index DocChanged(event sgbucket.TapEvent) // Retrieves stable sequence for index GetStableSequence(docID string) SequenceID // Retrieves stable sequence for index. Stale=false forces a reload of the clock from the bucket, // stable=true returns cached value (if available) GetStableClock(stale bool) (clock base.SequenceClock, err error) // contains filtered or unexported methods }
type ChangeIndexOptions ¶
type ChangeIndexOptions struct { Type IndexType // Index type Spec base.BucketSpec // Indexing bucket spec Bucket base.Bucket // Indexing bucket Writer bool // Cache Writer Options CacheOptions // Caching options NumShards uint16 // The number of CBGT shards to use] HashFrequency uint16 // Hash frequency for changes feeds }
func (ChangeIndexOptions) ValidateOrPanic ¶
func (c ChangeIndexOptions) ValidateOrPanic()
type ChangesOptions ¶
type ChangesOptions struct { Since SequenceID // sequence # to start _after_ Limit int // Max number of changes to return, if nonzero Conflicts bool // Show all conflicting revision IDs, not just winning one? IncludeDocs bool // Include doc body of each change? Wait bool // Wait for results, instead of immediately returning empty result? Continuous bool // Run continuously until terminated? Terminator chan bool // Caller can close this channel to terminate the feed HeartbeatMs uint64 // How often to send a heartbeat to the client TimeoutMs uint64 // After this amount of time, close the longpoll connection ActiveOnly bool // If true, only return information on non-deleted, non-removed revisions }
Options for changes-feeds
type ChannelCacheOptions ¶
type ChannelFeedType ¶
type ChannelFeedType int8
const ( ChannelFeedType_Standard ChannelFeedType = iota ChannelFeedType_ActiveBackfill ChannelFeedType_PendingBackfill )
type ChannelIndex ¶
type ChannelIndex interface { Add(entry *LogEntry) error AddSet(entries []*LogEntry) error GetClock() (uint64, error) SetClock() (uint64, error) GetCachedChanges(options ChangesOptions, stableSequence uint64) Compact() }
ChannelIndex defines the API used by the ChangeIndex to interact with the underlying index storage
type ChannelIndexStats ¶
type ChannelPollingStats ¶
type ChannelStats ¶
type ChannelStats struct { Name string `json:"channel_name"` IndexStats ChannelIndexStats `json:"index,omitempty"` PollingStats ChannelPollingStats `json:"poll,omitempty"` }
type ChannelStorage ¶
type ChannelStorage interface { ChannelStorageReader ChannelStorageWriter }
type ChannelStorageReader ¶
type ChannelStorageReader interface { // GetAllEntries returns all entries for the channel in the specified range, for all vbuckets GetChanges(fromSeq base.SequenceClock, channelClock base.SequenceClock, limit int) ([]*LogEntry, error) UpdateCache(fromSeq base.SequenceClock, channelClock base.SequenceClock, changedPartitions []*base.PartitionRange) error }
ChannelStorage implemented as two interfaces, to support swapping to different underlying storage model without significant refactoring.
type ChannelStorageWriter ¶
type ChannelStorageWriter interface { // AddEntrySet adds a set of entries to the channel index AddEntrySet(entries []*LogEntry) (clockUpdates []*base.PartitionClock, err error) RollbackTo(rollbackVbNo uint16, rollbackSeq uint64) error // If channel storage implementation uses separate storage for log entries and channel presence, // WriteLogEntry and ReadLogEntry can be used to read/write. Useful when changeIndex wants to // manage these document outside the scope of a channel. StoresLogEntries() allows callers to // check whether this is available. StoresLogEntries() bool WriteLogEntry(entry *LogEntry) error }
type DBOnlineCallback ¶
type DBOnlineCallback func(dbContext *DatabaseContext)
Function type for something that calls NewDatabaseContext and wants a callback when the DB is detected to come back online. A rest.ServerContext package cannot be passed since it would introduce a circular dependency
type DBStateChangeEvent ¶
type DBStateChangeEvent struct { AsyncEvent Doc Body }
DBStateChangeEvent is raised when a DB goes online or offline. Event has name of DB that is firing event, the admin interface address for interacting with the db. The new state, the reason for the state change, the local system time of the change
func (*DBStateChangeEvent) EventType ¶
func (dsce *DBStateChangeEvent) EventType() EventType
func (*DBStateChangeEvent) String ¶
func (dsce *DBStateChangeEvent) String() string
type Database ¶
type Database struct { *DatabaseContext // contains filtered or unexported fields }
Represents a simulated CouchDB database. A new instance is created for each HTTP request, so this struct does not have to be thread-safe.
func CreateDatabase ¶
func CreateDatabase(context *DatabaseContext) (*Database, error)
func GetDatabase ¶
func GetDatabase(context *DatabaseContext, user auth.User) (*Database, error)
Makes a Database object given its name and bucket.
func (*Database) AddDocInstanceToChangeEntry ¶
func (db *Database) AddDocInstanceToChangeEntry(entry *ChangeEntry, doc *document, options ChangesOptions)
Adds a document body and/or its conflicts to a ChangeEntry
func (*Database) AddDocToChangeEntry ¶
func (db *Database) AddDocToChangeEntry(entry *ChangeEntry, options ChangesOptions)
func (*Database) AuthorizeDocID ¶
Returns an HTTP 403 error if the User is not allowed to access any of this revision's channels.
func (*Database) DeleteAllDocs ¶
Deletes all documents in the database
func (*Database) DeleteDesignDoc ¶
func (*Database) DeleteDoc ¶
Deletes a document, by adding a new revision whose "_deleted" property is true.
func (*Database) DeleteSpecial ¶
func (*Database) ForEachDocID ¶
func (db *Database) ForEachDocID(callback ForEachDocIDFunc, resultsOpts ForEachDocIDOptions) error
Iterates over all documents in the database, calling the callback function on each
func (*Database) ForEachStubAttachment ¶
func (db *Database) ForEachStubAttachment(body Body, minRevpos int, callback AttachmentCallback) error
Given a document body, invokes the callback once for each attachment that doesn't include its data. The callback is told whether the attachment body is known to the database, according to its digest. If the attachment isn't known, the callback can return data for it, which will be added to the metadata as a "data" property.
func (*Database) GetAttachment ¶
func (db *Database) GetAttachment(key AttachmentKey) ([]byte, error)
Retrieves an attachment given its key.
func (*Database) GetChangeLog ¶
func (*Database) GetChanges ¶
func (db *Database) GetChanges(channels base.Set, options ChangesOptions) ([]*ChangeEntry, error)
Synchronous convenience function that returns all changes as a simple array.
func (*Database) GetDesignDoc ¶
func (*Database) GetDocAndActiveRev ¶
Returns the body of the active revision of a document, as well as the document's current channels and the user/roles it grants channel access to.
func (*Database) GetRev ¶
func (db *Database) GetRev(docid, revid string, history bool, attachmentsSince []string) (Body, error)
Shortcut for GetRevWithHistory
func (*Database) GetRevAndChannels ¶
func (db *Database) GetRevAndChannels(docid string, revid string, listRevisions bool) (body Body, channels channels.ChannelMap, access UserAccessMap, roleAccess UserAccessMap, flags uint8, sequence uint64, err error)
Returns the body of a revision of a document, as well as the document's current channels and the user/roles it grants channel access to.
func (*Database) GetRevWithHistory ¶
func (db *Database) GetRevWithHistory(docid, revid string, maxHistory int, historyFrom []string, attachmentsSince []string, showExp bool) (Body, error)
Returns the body of a revision of a document. Uses the revision cache.
- revid may be "", meaning the current revision.
- maxHistory is >0 if the caller wants a revision history; it's the max length of the history.
- historyFrom is an optional list of revIDs the client already has. If any of these are found in the revision's history, it will be trimmed after that revID.
- attachmentsSince is nil to return no attachment bodies, otherwise a (possibly empty) list of revisions for which the client already has attachments and doesn't need bodies. Any attachment that hasn't changed since one of those revisions will be returned as a stub.
func (*Database) GetSpecial ¶
func (*Database) ImportRawDoc ¶
func (db *Database) ImportRawDoc(docid string, value []byte, isDelete bool) (docOut *document, err error)
Imports a document that was written by someone other than sync gateway.
func (*Database) MultiChangesFeed ¶
func (db *Database) MultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
func (*Database) Put ¶
Updates or creates a document. The new body's "_rev" property must match the current revision's, if any.
func (*Database) PutDesignDoc ¶
func (*Database) PutExistingRev ¶
Adds an existing revision to a document along with its history (list of rev IDs.) This is equivalent to the "new_edits":false mode of CouchDB.
func (*Database) PutSpecial ¶
func (*Database) QueryDesignDoc ¶
func (*Database) ReloadUser ¶
Reloads the database's User object, in case its persistent properties have been changed.
func (*Database) RevDiff ¶
Given a document ID and a set of revision IDs, looks up which ones are not known. Returns an array of the unknown revisions, and an array of known revisions that might be recent ancestors.
func (*Database) SimpleMultiChangesFeed ¶
func (db *Database) SimpleMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
Returns the (ordered) union of all of the changes made to multiple channels.
func (*Database) UpdateAllDocChannels ¶
Re-runs the sync function on every current document in the database (if doCurrentDocs==true) and/or imports docs in the bucket not known to the gateway (if doImportDocs==true). To be used when the JavaScript sync function changes.
func (*Database) VectorMultiChangesFeed ¶
func (db *Database) VectorMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)
Returns the (ordered) union of all of the changes made to multiple channels.
func (*Database) WriteMultipartDocument ¶
Writes a revision to a MIME multipart writer, encoding large attachments as separate parts.
func (*Database) WriteRevisionAsPart ¶
func (db *Database) WriteRevisionAsPart(revBody Body, isError bool, compressPart bool, writer *multipart.Writer) error
Adds a new part to the given multipart writer, containing the given revision. The revision will be written as a nested multipart body if it has attachments.
type DatabaseContext ¶
type DatabaseContext struct { Name string // Database name Bucket base.Bucket // Storage BucketSpec base.BucketSpec // The BucketSpec BucketLock sync.RWMutex // Control Access to the underlying bucket object ChannelMapper *channels.ChannelMapper // Runs JS 'sync' function StartTime time.Time // Timestamp when context was instantiated ChangesClientStats Statistics // Tracks stats of # of changes connections RevsLimit uint32 // Max depth a document's revision tree can grow to Shadower *Shadower // Tracks an external Couchbase bucket EventMgr *EventManager // Manages notification events AllowEmptyPassword bool // Allow empty passwords? Defaults to false SequenceHasher *sequenceHasher // Used to generate and resolve hash values for vector clock sequences SequenceType SequenceType // Type of sequences used for this DB (integer or vector clock) Options DatabaseContextOptions // Database Context Options AccessLock sync.RWMutex // Allows DB offline to block until synchronous calls have completed State uint32 // The runtime state of the DB from a service perspective ExitChanges chan struct{} // Active _changes feeds on the DB will close when this channel is closed OIDCProviders auth.OIDCProviderMap // OIDC clients // contains filtered or unexported fields }
Basic description of a database. Shared between all Database objects on the same database. This object is thread-safe so it can be shared between HTTP handlers.
func NewDatabaseContext ¶
func NewDatabaseContext(dbName string, bucket base.Bucket, autoImport bool, options DatabaseContextOptions) (*DatabaseContext, error)
Creates a new DatabaseContext on a bucket. The bucket will be closed when this context closes.
func (*DatabaseContext) AllPrincipalIDs ¶
func (db *DatabaseContext) AllPrincipalIDs() (users, roles []string, err error)
Returns the IDs of all users and roles
func (*DatabaseContext) Authenticator ¶
func (context *DatabaseContext) Authenticator() *auth.Authenticator
func (*DatabaseContext) Close ¶
func (context *DatabaseContext) Close()
func (*DatabaseContext) ComputeChannelsForPrincipal ¶
func (context *DatabaseContext) ComputeChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) ComputeRolesForUser ¶
Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) ComputeSequenceChannelsForPrincipal ¶
func (context *DatabaseContext) ComputeSequenceChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) ComputeSequenceRolesForUser ¶
func (context *DatabaseContext) ComputeSequenceRolesForUser(user auth.User) (channels.TimedSet, error)
Recomputes the set of roles a User has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) ComputeVbSequenceChannelsForPrincipal ¶
func (context *DatabaseContext) ComputeVbSequenceChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)
Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) ComputeVbSequenceRolesForUser ¶
func (context *DatabaseContext) ComputeVbSequenceRolesForUser(user auth.User) (channels.TimedSet, error)
Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.
func (*DatabaseContext) DeleteUserSessions ¶
func (db *DatabaseContext) DeleteUserSessions(userName string) error
Deletes all session documents for a user
func (*DatabaseContext) GetChangeIndex ¶
func (context *DatabaseContext) GetChangeIndex() ChangeIndex
Utility function to support cache testing from outside db package
func (*DatabaseContext) GetDoc ¶
func (db *DatabaseContext) GetDoc(docid string) (doc *document, err error)
Lowest-level method that reads a document from the bucket.
func (*DatabaseContext) GetDocSyncData ¶
func (db *DatabaseContext) GetDocSyncData(docid string) (syncData, error)
This gets *just* the Sync Metadata (_sync field) rather than the entire doc, for efficiency reasons TODO: we'll need to include some 'if db.UseXattrs' handling, similar to what we're doing in GetDoc
func (*DatabaseContext) GetIndexBucket ¶
func (context *DatabaseContext) GetIndexBucket() base.Bucket
Helper method for API unit test retrieval of index bucket
func (*DatabaseContext) GetOIDCProvider ¶
func (context *DatabaseContext) GetOIDCProvider(providerName string) (*auth.OIDCProvider, error)
func (*DatabaseContext) GetPrincipal ¶
func (dbc *DatabaseContext) GetPrincipal(name string, isUser bool) (info *PrincipalConfig, err error)
func (*DatabaseContext) GetStableClock ¶
func (context *DatabaseContext) GetStableClock() (clock base.SequenceClock, err error)
func (*DatabaseContext) GetUserViewsEnabled ¶
func (context *DatabaseContext) GetUserViewsEnabled() bool
func (*DatabaseContext) IndexAllChannelStats ¶
func (db *DatabaseContext) IndexAllChannelStats() ([]*ChannelStats, error)
func (*DatabaseContext) IndexChannelStats ¶
func (db *DatabaseContext) IndexChannelStats(channelName string) (*ChannelStats, error)
func (*DatabaseContext) IndexStats ¶
func (db *DatabaseContext) IndexStats() (indexStats *IndexStats, err error)
func (*DatabaseContext) IsClosed ¶
func (context *DatabaseContext) IsClosed() bool
func (*DatabaseContext) LastSequence ¶
func (context *DatabaseContext) LastSequence() (uint64, error)
func (*DatabaseContext) NotifyUser ¶
func (context *DatabaseContext) NotifyUser(username string)
func (*DatabaseContext) ParseSequenceID ¶
func (dbc *DatabaseContext) ParseSequenceID(str string) (s SequenceID, err error)
func (*DatabaseContext) ReserveSequences ¶
func (context *DatabaseContext) ReserveSequences(numToReserve uint64) error
func (*DatabaseContext) RestartListener ¶
func (context *DatabaseContext) RestartListener() error
For testing only!
func (*DatabaseContext) SetOnChangeCallback ¶
func (context *DatabaseContext) SetOnChangeCallback(callback DocChangedFunc)
func (*DatabaseContext) SetUserViewsEnabled ¶
func (context *DatabaseContext) SetUserViewsEnabled(value bool)
func (*DatabaseContext) TakeDbOffline ¶
func (dc *DatabaseContext) TakeDbOffline(reason string) error
func (*DatabaseContext) TapListener ¶
func (context *DatabaseContext) TapListener() changeListener
func (*DatabaseContext) UpdatePrincipal ¶
func (dbc *DatabaseContext) UpdatePrincipal(newInfo PrincipalConfig, isUser bool, allowReplace bool) (replaced bool, err error)
Updates or creates a principal from a PrincipalConfig structure.
func (*DatabaseContext) UpdateSyncFun ¶
func (context *DatabaseContext) UpdateSyncFun(syncFun string) (changed bool, err error)
Sets the database context's sync function based on the JS code from config. Returns a boolean indicating whether the function is different from the saved one. If multiple gateway instances try to update the function at the same time (to the same new value) only one of them will get a changed=true result.
func (*DatabaseContext) UseGlobalSequence ¶
func (context *DatabaseContext) UseGlobalSequence() bool
func (*DatabaseContext) UseXattrs ¶
func (context *DatabaseContext) UseXattrs() bool
func (*DatabaseContext) WaitForPendingChanges ¶
func (context *DatabaseContext) WaitForPendingChanges() (err error)
Wait until the change-cache has caught up with the latest writes to the database.
func (*DatabaseContext) WaitForSequence ¶
func (context *DatabaseContext) WaitForSequence(sequence uint64) (err error)
Wait until the change-cache has caught up with the latest writes to the database.
func (*DatabaseContext) WaitForSequenceWithMissing ¶
func (context *DatabaseContext) WaitForSequenceWithMissing(sequence uint64) (err error)
Wait until the change-cache has caught up with the latest writes to the database.
type DatabaseContextOptions ¶
type DatabaseContextOptions struct { CacheOptions *CacheOptions IndexOptions *ChangeIndexOptions SequenceHashOptions *SequenceHashOptions RevisionCacheCapacity uint32 AdminInterface *string UnsupportedOptions UnsupportedOptions TrackDocs bool // Whether doc tracking channel should be created (used for autoImport, shadowing) OIDCOptions *auth.OIDCOptions DBOnlineCallback DBOnlineCallback // Callback function to take the DB back online }
type DenseBlock ¶
type DenseBlock struct { Key string // Key of block document in the index bucket // contains filtered or unexported fields }
func NewDenseBlock ¶
func NewDenseBlock(key string, startClock base.PartitionClock) *DenseBlock
func (*DenseBlock) AddEntrySet ¶
func (d *DenseBlock) AddEntrySet(entries []*LogEntry, bucket base.Bucket) (overflow []*LogEntry, pendingRemoval []*LogEntry, updateClock base.PartitionClock, casFailure bool, err error)
Adds entries to block and writes block to the bucket
func (*DenseBlock) Count ¶
func (d *DenseBlock) Count() uint16
func (*DenseBlock) GetAllEntries ¶
func (d *DenseBlock) GetAllEntries() []*LogEntry
func (*DenseBlock) GetEntries ¶
func (*DenseBlock) GetEntry ¶
func (d *DenseBlock) GetEntry(position int64, length uint16) (entry DenseBlockDataEntry)
func (*DenseBlock) GetIndexEntry ¶
func (d *DenseBlock) GetIndexEntry(position int64) (indexEntry DenseBlockIndexEntry)
func (*DenseBlock) MakeLogEntry ¶
func (d *DenseBlock) MakeLogEntry(indexEntry DenseBlockIndexEntry, entry DenseBlockDataEntry) *LogEntry
func (*DenseBlock) MarkInactive ¶
func (d *DenseBlock) MarkInactive() error
MarkInactive - apply any changes required when block stops being the active block
func (*DenseBlock) RemoveEntrySet ¶
func (d *DenseBlock) RemoveEntrySet(entries []*LogEntry, bucket base.Bucket) (pendingRemoval []*LogEntry, err error)
Attempts to remove entries from the block
func (*DenseBlock) RollbackTo ¶
func (d *DenseBlock) RollbackTo(rollbackVbNo uint16, rollbackSeq uint64, bucket base.Bucket) (rollbackComplete bool, err error)
Removes all entries greater than vb, seq from the block. Returns rollbackComplete=true if it finds a seq in the block for the vbucket where seq <= rollbackSeq
type DenseBlockDataEntry ¶
type DenseBlockDataEntry []byte
DenseBlockDataEntry - a single doc entry within a block. Stores key, revId, and flags. Storage format:
|-----------|----------|----------------------------------| | flags | 1 byte | Flags (deleted, removed, etc) | | keylen | 2 bytes | Length of key | | key | n bytes | Key | | revid | n bytes | Revision id | ----------------------------------------------------------- We don't store rev id length - it's derived from the entryLen stored in the DenseBlockEntryIndex.
func NewDenseBlockDataEntry ¶
func NewDenseBlockDataEntry(docId, revId string, flags uint8) DenseBlockDataEntry
type DenseBlockEntry ¶
type DenseBlockEntry struct { DenseBlockIndexEntry DenseBlockDataEntry }
func (*DenseBlockEntry) MakeLogEntry ¶
func (d *DenseBlockEntry) MakeLogEntry() *LogEntry
func (*DenseBlockEntry) MakeLogEntryWithDocId ¶
func (d *DenseBlockEntry) MakeLogEntryWithDocId(docId string) *LogEntry
Used when the doc id has already been converted to string, to avoid performance overhead
type DenseBlockIndexEntry ¶
type DenseBlockIndexEntry []byte
DenseBlockIndexEntry is a helper class for interacting with entries in the index portion of a DenseBlock. NewDenseBlockIndexEntry(...) should be used for creating new entries; work with existing entries by targeting a slice withing the DenseBlock.
| Name | Size | Description | |-----------|----------|----------------------------------| | vbno | 2 bytes | Vbucket number | | sequence | 8 bytes | Vbucket seq | | entryLen | 2 bytes | Length of associated block entry | -----------------------------------------------------------
func NewDenseBlockIndexEntry ¶
func NewDenseBlockIndexEntry(vbno uint16, sequence uint64, entryLen uint16) DenseBlockIndexEntry
type DenseBlockIterator ¶
type DenseBlockIterator struct {
// contains filtered or unexported fields
}
DenseBlockIterator - manages iteration over the contents of a block by storing pointer to index and entry locations
func NewDenseBlockIterator ¶
func NewDenseBlockIterator(block *DenseBlock) *DenseBlockIterator
type DenseBlockList ¶
type DenseBlockList struct {
// contains filtered or unexported fields
}
DenseBlockList is an ordered list of DenseBlockListEntries keys. Each key is associated with the starting clock for that DenseBlock. The list is persisted into one or more documents (DenseBlockListStorage) in the index. The active list has key activeKey - older lists are rotated into activeKey_n
func NewDenseBlockList ¶
func NewDenseBlockList(channelName string, partition uint16, indexBucket base.Bucket) *DenseBlockList
func NewDenseBlockListReader ¶
func NewDenseBlockListReader(channelName string, partition uint16, indexBucket base.Bucket) *DenseBlockList
func (*DenseBlockList) ActiveListEntry ¶
func (l *DenseBlockList) ActiveListEntry() *DenseBlockListEntry
func (*DenseBlockList) AddBlock ¶
func (l *DenseBlockList) AddBlock() (*DenseBlock, error)
Creates a new block, and adds to the block list
func (*DenseBlockList) GetActiveBlock ¶
func (l *DenseBlockList) GetActiveBlock() *DenseBlock
func (*DenseBlockList) LoadBlock ¶
func (l *DenseBlockList) LoadBlock(listEntry DenseBlockListEntry) *DenseBlock
func (*DenseBlockList) LoadPrevious ¶
func (l *DenseBlockList) LoadPrevious() error
LoadPrevious loads the previous DenseBlockList storage document, and:
- prepends the blocks in that DenseBlockList to the block set (l.blocks)
- shifts the activeStartIndex based on the modified list
- updates validFromCounter
func (*DenseBlockList) PreviousBlock ¶
func (l *DenseBlockList) PreviousBlock(currentBlockIndex int) (*DenseBlockListEntry, error)
Returns the block preceding the specified block index in the list. Loads earlier block lists if needed. Returns error if currentBlockIndex not found in list. Returns nil if currentBlockIndex is the first block in the list.
func (*DenseBlockList) ReloadDenseBlockList ¶
func (l *DenseBlockList) ReloadDenseBlockList() (found bool, err error)
func (*DenseBlockList) ValidFrom ¶
func (l *DenseBlockList) ValidFrom() base.PartitionClock
ValidFrom returns the starting clock of the first block in the list.
type DenseBlockListEntry ¶
type DenseBlockListEntry struct { BlockIndex int `json:"index"` // Dense Block index StartClock base.PartitionClock `json:"clock"` // Starting clock for Dense Block // contains filtered or unexported fields }
func (*DenseBlockListEntry) Key ¶
func (e *DenseBlockListEntry) Key(parentList *DenseBlockList) string
type DenseBlockListStorage ¶
type DenseBlockListStorage struct { Counter uint32 `json:"counter"` Blocks []DenseBlockListEntry `json:"blocks"` }
type DensePartitionStorageReader ¶
type DensePartitionStorageReader struct {
// contains filtered or unexported fields
}
func NewDensePartitionStorageReader ¶
func NewDensePartitionStorageReader(channelName string, partitionNo uint16, indexBucket base.Bucket) *DensePartitionStorageReader
func (*DensePartitionStorageReader) GetChanges ¶
func (pr *DensePartitionStorageReader) GetChanges(partitionRange base.PartitionRange) (*PartitionChanges, error)
GetChanges attempts to return results from the cached changes. If the cache doesn't satisfy the specified range, retrieves from the index. Note: currently no writeback of indexed retrieval into the cache - cache is only updated during UpdateCache()
func (*DensePartitionStorageReader) UpdateCache ¶
func (pr *DensePartitionStorageReader) UpdateCache(numBlocks int) error
type DensePartitionStorageReaderNonCaching ¶
type DensePartitionStorageReaderNonCaching struct {
// contains filtered or unexported fields
}
DensePartitionStorageReaderNonCaching is a non-caching reader - every read request retrieves the latest from the bucket.
func NewDensePartitionStorageReaderNonCaching ¶
func NewDensePartitionStorageReaderNonCaching(channelName string, partitionNo uint16, indexBucket base.Bucket) *DensePartitionStorageReaderNonCaching
func (*DensePartitionStorageReaderNonCaching) GetBlockListForRange ¶
func (r *DensePartitionStorageReaderNonCaching) GetBlockListForRange(partitionRange base.PartitionRange) *DenseBlockList
func (*DensePartitionStorageReaderNonCaching) GetChanges ¶
func (r *DensePartitionStorageReaderNonCaching) GetChanges(partitionRange base.PartitionRange) (*PartitionChanges, error)
type DenseStorageReader ¶
type DenseStorageReader struct {
// contains filtered or unexported fields
}
Implementation of ChannelStorage that stores entries as an append-based list of full log entries
func NewDenseStorageReader ¶
func NewDenseStorageReader(bucket base.Bucket, channelName string, partitions *base.IndexPartitions) *DenseStorageReader
func (*DenseStorageReader) GetChanges ¶
func (ds *DenseStorageReader) GetChanges(sinceClock base.SequenceClock, toClock base.SequenceClock, limit int) (changes []*LogEntry, err error)
func (*DenseStorageReader) UpdateCache ¶
func (ds *DenseStorageReader) UpdateCache(sinceClock base.SequenceClock, toClock base.SequenceClock, changedPartitions []*base.PartitionRange) error
type DocChangedFunc ¶
type DocumentChangeEvent ¶
type DocumentChangeEvent struct { AsyncEvent Doc Body OldDoc string Channels base.Set }
DocumentChangeEvent is raised when a document has been successfully written to the backing data store. Event has the document body and channel set as properties.
func (*DocumentChangeEvent) EventType ¶
func (dce *DocumentChangeEvent) EventType() EventType
func (*DocumentChangeEvent) String ¶
func (dce *DocumentChangeEvent) String() string
type EventHandler ¶
EventHandler interface represents an instance of an event handler defined in the database config
type EventManager ¶
type EventManager struct {
// contains filtered or unexported fields
}
EventManager routes raised events to corresponding event handlers. Incoming events are just dumped in the eventChannel to minimize time spent blocking whatever process is raising the event. The event queue worker goroutine works the event channel and sends events to the appropriate handlers
func NewEventManager ¶
func NewEventManager() *EventManager
Creates a new event manager. Sets up the event channel for async events, and the goroutine to monitor and process that channel.
func (*EventManager) HasHandlerForEvent ¶
func (em *EventManager) HasHandlerForEvent(eventType EventType) bool
Checks whether a handler of the given type has been registered to the event manager.
func (*EventManager) ProcessEvent ¶
func (em *EventManager) ProcessEvent(event Event)
Concurrent processing of all async event handlers registered for the event type
func (*EventManager) RaiseDBStateChangeEvent ¶
func (em *EventManager) RaiseDBStateChangeEvent(dbName string, state string, reason string, adminInterface string) error
Raises a DB state change event based on the db name, admininterface, new state, reason and local system time. If the event manager doesn't have a listener for this event, ignores.
func (*EventManager) RaiseDocumentChangeEvent ¶
func (em *EventManager) RaiseDocumentChangeEvent(body Body, oldBodyJSON string, channels base.Set) error
Raises a document change event based on the the document body and channel set. If the event manager doesn't have a listener for this event, ignores.
func (*EventManager) RegisterEventHandler ¶
func (em *EventManager) RegisterEventHandler(handler EventHandler, eventType EventType)
Register a new event handler to the EventManager. The event manager will route events of type eventType to the handler.
func (*EventManager) Start ¶
func (em *EventManager) Start(maxProcesses uint, waitTime int)
Starts the listener queue for the event manager
type ForEachDocIDFunc ¶
type ForEachDocIDOptions ¶
The ForEachDocID options for limiting query results
type IndexBlock ¶
type IndexBlock interface { AddEntry(entry *LogEntry) error Key() string Marshal() ([]byte, error) Unmarshal(value []byte) error Cas() uint64 SetCas(cas uint64) GetEntries(vbNo uint16, fromSeq uint64, toSeq uint64, includeKeys bool) (entries []*LogEntry, keySet []string) GetAllEntries() []*LogEntry }
IndexBlock interface - defines interactions with a block
func NewIndexBlock ¶
func NewIndexBlock(channelName string, sequence uint64, partition uint16, partitions *base.IndexPartitions) IndexBlock
type IndexPartitionsFunc ¶
type IndexPartitionsFunc func() (*base.IndexPartitions, error)
type IndexStats ¶
type IndexStats struct {
PartitionStats PartitionStats `json:"partitions"`
}
type JSEventFunction ¶
A thread-safe wrapper around a jsEventTask, i.e. an event function.
func NewJSEventFunction ¶
func NewJSEventFunction(fnSource string) *JSEventFunction
func (*JSEventFunction) CallFunction ¶
func (ef *JSEventFunction) CallFunction(event Event) (interface{}, error)
Calls a jsEventFunction returning an interface{}
func (*JSEventFunction) CallValidateFunction ¶
func (ef *JSEventFunction) CallValidateFunction(event Event) (bool, error)
Calls a jsEventFunction returning bool.
type KvChannelIndex ¶
type KvChannelIndex struct {
// contains filtered or unexported fields
}
KvChannelIndex manages read requests from Sync Gateway to the bucket-based channel index, for a given channel. Supports one-shot requests, as well as polling to support notification for continuous/longpoll requests. KvChangeIndex maintains a collection of KvChannelIndex instances for active channels.
func NewKvChannelIndex ¶
func NewKvChannelIndex(channelName string, bucket base.Bucket, partitions *base.IndexPartitions, onChangeCallback func(base.Set)) *KvChannelIndex
func (*KvChannelIndex) GetChanges ¶
func (k *KvChannelIndex) GetChanges(sinceClock base.SequenceClock, toClock base.SequenceClock, limit int) ([]*LogEntry, error)
Returns the set of index entries for the channel more recent than the specified since SequenceClock. Index entries with sequence values greater than the index stable sequence are not returned.
func (*KvChannelIndex) IndexBucket ¶
func (k *KvChannelIndex) IndexBucket() base.Bucket
type LogEntries ¶
type LogEntries []*LogEntry
type LogEntry ¶
func (*LogEntry) SetRemoved ¶
func (entry *LogEntry) SetRemoved()
type LogPriorityQueue ¶
type LogPriorityQueue []*LogEntry
A priority-queue of LogEntries, kept ordered by increasing sequence #.
func (LogPriorityQueue) Len ¶
func (h LogPriorityQueue) Len() int
func (LogPriorityQueue) Less ¶
func (h LogPriorityQueue) Less(i, j int) bool
func (*LogPriorityQueue) Pop ¶
func (h *LogPriorityQueue) Pop() interface{}
func (*LogPriorityQueue) Push ¶
func (h *LogPriorityQueue) Push(x interface{})
func (LogPriorityQueue) Swap ¶
func (h LogPriorityQueue) Swap(i, j int)
type OidcTestProviderOptions ¶
type OidcTestProviderOptions struct { Enabled bool `json:"enabled,omitempty"` // Whether the oidc_test_provider endpoints should be exposed on the public API UnsignedIDToken bool `json:"unsigned_id_token,omitempty"` // Whether the internal test provider returns a signed ID token on a refresh request. Used to simulate Azure behaviour }
type PartitionChanges ¶
type PartitionChanges struct {
// contains filtered or unexported fields
}
func NewPartitionChanges ¶
func NewPartitionChanges() *PartitionChanges
func (*PartitionChanges) AddEntry ¶
func (p *PartitionChanges) AddEntry(entry *LogEntry)
func (*PartitionChanges) Count ¶
func (p *PartitionChanges) Count() int
func (*PartitionChanges) GetVbChanges ¶
func (p *PartitionChanges) GetVbChanges(vbNo uint16) []*LogEntry
func (*PartitionChanges) PrependChanges ¶
func (p *PartitionChanges) PrependChanges(other *PartitionChanges)
type PartitionMapStats ¶
type PartitionMapStats struct {
Storage base.PartitionStorageSet `json:"partitions"`
}
type PartitionStats ¶
type PartitionStats struct { PartitionMap PartitionMapStats `json:"index_partitions"` CBGTMap PartitionMapStats `json:"cbgt_partitions"` PartitionsMatch bool `json:"matches"` }
type PrincipalConfig ¶
type PrincipalConfig struct { Name *string `json:"name,omitempty"` ExplicitChannels base.Set `json:"admin_channels,omitempty"` Channels base.Set `json:"all_channels"` // Fields below only apply to Users, not Roles: Email string `json:"email,omitempty"` Disabled bool `json:"disabled,omitempty"` Password *string `json:"password,omitempty"` ExplicitRoleNames []string `json:"admin_roles,omitempty"` RoleNames []string `json:"roles,omitempty"` }
Struct that configures settings of a User/Role, for UpdatePrincipal. Also used in the rest package as a JSON object that defines a User/Role within a DbConfig and structures the request/response body in the admin REST API for /db/_user/*
func (PrincipalConfig) IsPasswordValid ¶
func (p PrincipalConfig) IsPasswordValid(allowEmptyPass bool) (isValid bool, reason string)
Check if the password in this PrincipalConfig is valid. Only allow empty passwords if allowEmptyPass is true.
type ResponseType ¶
type ResponseType uint8
const ( StringResponse ResponseType = iota JSObjectResponse )
type RevTree ¶
A revision tree maps each revision ID to its RevInfo.
func (RevTree) MarshalJSON ¶
func (RevTree) UnmarshalJSON ¶
type RevisionCache ¶
type RevisionCache struct {
// contains filtered or unexported fields
}
An LRU cache of document revision bodies, together with their channel access.
func NewRevisionCache ¶
func NewRevisionCache(capacity int, loaderFunc RevisionCacheLoaderFunc) *RevisionCache
Creates a revision cache with the given capacity and an optional loader function.
func (*RevisionCache) Get ¶
Looks up a revision from the cache. Returns the body of the revision, its history, and the set of channels it's in. If the cache has a loaderFunction, it will be called if the revision isn't in the cache; any error returned by the loaderFunction will be returned from Get.
type RevisionCacheLoaderFunc ¶
type SeqHashCacheLoaderFunc ¶
type SequenceHashOptions ¶
type SequenceID ¶
type SequenceID struct { SeqType SequenceType // Sequence Type (Int or Clock) TriggeredBy uint64 // Int sequence: The sequence # that triggered this (0 if none) LowSeq uint64 // Int sequence: Lowest contiguous sequence seen on the feed Seq uint64 // Int sequence: The actual internal sequence Clock base.SequenceClock // Clock sequence: Sequence (distributed index) TriggeredByClock base.SequenceClock // Clock sequence: Sequence (distributed index) that triggered this ClockHash string // String representation of clock hash SequenceHasher *sequenceHasher // Sequence hasher - used when unmarshalling clock-based sequences TriggeredByVbNo uint16 // Vbucket number for triggered by sequence LowHash string // Clock hash used for continuous feed where some entries aren't hashed // contains filtered or unexported fields }
SequenceID doesn't do any clock hash management - it's expected that hashing has already been done (if required) when the clock is set.
func ParseSequenceIDFromJSON ¶
func ParseSequenceIDFromJSON(data []byte) (SequenceID, error)
func (SequenceID) Before ¶
func (s SequenceID) Before(s2 SequenceID) bool
The most significant value is TriggeredBy, unless it's zero, in which case use Seq. The tricky part is that "n" sorts after "n:m" for any nonzero m
func (SequenceID) Equals ¶
func (s SequenceID) Equals(s2 SequenceID) bool
Equality of sequences, based on seq, triggered by and low hash
func (SequenceID) IsNonZero ¶
func (s SequenceID) IsNonZero() bool
func (SequenceID) MarshalJSON ¶
func (s SequenceID) MarshalJSON() ([]byte, error)
func (SequenceID) SafeSequence ¶
func (s SequenceID) SafeSequence() uint64
func (SequenceID) String ¶
func (s SequenceID) String() string
Format sequence ID to send to clients. Sequence IDs can be in one of the following formats:
Seq - simple sequence TriggeredBy:Seq - when TriggeredBy is non-zero, LowSeq is zero LowSeq:TriggeredBy:Seq - when LowSeq is non-zero.
When LowSeq is non-zero but TriggeredBy is zero, will appear as LowSeq::Seq. When LowSeq is non-zero but is greater than s.Seq (occurs when sending previously skipped sequences), ignore LowSeq.
func (*SequenceID) UnmarshalJSON ¶
func (s *SequenceID) UnmarshalJSON(data []byte) error
func (SequenceID) VbucketSequenceAfter ¶
func (s SequenceID) VbucketSequenceAfter(vbNo uint16, seq uint64) bool
func (SequenceID) VbucketSequenceBefore ¶
func (s SequenceID) VbucketSequenceBefore(vbNo uint16, seq uint64) bool
type SequenceType ¶
type SequenceType int
type Shadower ¶
type Shadower struct {
// contains filtered or unexported fields
}
Bidirectional sync with an external Couchbase bucket. Watches the bucket's tap feed and applies changes to the matching managed document. Accepts local change notifications and makes equivalent changes to the external bucket. See: https://github.com/tophatch/sync_gateway/wiki/Bucket-Shadowing
func NewShadower ¶
func NewShadower(context *DatabaseContext, bucket base.Bucket, docIDPattern *regexp.Regexp) (*Shadower, error)
Creates a new Shadower.
func (*Shadower) PushRevision ¶
func (s *Shadower) PushRevision(doc *document)
Saves a new local revision to the external bucket.
type SkippedSequence ¶
type SkippedSequence struct {
// contains filtered or unexported fields
}
type SkippedSequenceQueue ¶
type SkippedSequenceQueue []*SkippedSequence
An ordered queue that supports the Remove operation
func (SkippedSequenceQueue) Contains ¶
func (h SkippedSequenceQueue) Contains(x uint64) bool
Contains does a simple search to detect presence
func (*SkippedSequenceQueue) Push ¶
func (h *SkippedSequenceQueue) Push(x *SkippedSequence) error
We always know that incoming missed sequence numbers will be larger than any previously added, so we don't need to do any sorting - just append to the slice
func (*SkippedSequenceQueue) Remove ¶
func (h *SkippedSequenceQueue) Remove(x uint64) error
Remove does a simple binary search to find and remove.
type SortedEntrySet ¶
type SortedEntrySet []*LogEntry
SortedEntrySet Optimizes removal of entries from a sorted array.
func (*SortedEntrySet) Remove ¶
func (h *SortedEntrySet) Remove(x uint64) error
type Statistics ¶
type Statistics struct {
// contains filtered or unexported fields
}
Tracks usage count of a resource, such as the _changes feed. (Thread-safe.)
func (*Statistics) Decrement ¶
func (stats *Statistics) Decrement()
func (*Statistics) Increment ¶
func (stats *Statistics) Increment()
func (*Statistics) MaxCount ¶
func (stats *Statistics) MaxCount() uint32
func (*Statistics) Reset ¶
func (stats *Statistics) Reset()
func (*Statistics) TotalCount ¶
func (stats *Statistics) TotalCount() uint32
type UnsupportedOptions ¶
type UnsupportedOptions struct { UserViews UserViewsOptions `json:"user_views,omitempty"` // Config settings for user views Replicator2 bool `json:"replicator_2,omitempty"` // Enable new replicator (_blipsync) OidcTestProvider OidcTestProviderOptions `json:"oidc_test_provider,omitempty"` // Config settings for OIDC Provider EnableXattr *bool `json:"enable_extended_attributes"` // Use xattr for _sync }
type UserAccessMap ¶
Maps what users have access to what channels or roles, and when they got that access.
type UserViewsOptions ¶
type UserViewsOptions struct {
Enabled *bool `json:"enabled,omitempty"` // Whether pass-through view query is supported through public API
}
type ViewDoc ¶
type ViewDoc struct {
Json json.RawMessage // should be type 'document', but that fails to unmarshal correctly
}
type Webhook ¶
type Webhook struct { AsyncEventHandler // contains filtered or unexported fields }
Webhook is an implementation of EventHandler that sends an asynchronous HTTP POST
func NewWebhook ¶
Creates a new webhook handler based on the url and filter function.
func (*Webhook) HandleEvent ¶
Performs an HTTP POST to the url defined for the handler. If a filter function is defined, calls it to determine whether to POST. The payload for the POST is depends on the event type.
func (*Webhook) SanitizedUrl ¶
Source Files ¶
- assimilator.go
- attachment.go
- change_cache.go
- change_index.go
- change_listener.go
- changes.go
- changes_view.go
- channel_cache.go
- crud.go
- database.go
- design_doc.go
- document.go
- event.go
- event_handler.go
- event_manager.go
- index_changes.go
- kv_change_index.go
- kv_change_index_reader.go
- kv_channel_index.go
- kv_channel_storage.go
- kv_dense_block.go
- kv_dense_channel_storage.go
- kv_dense_channel_storage_reader.go
- revision.go
- revision_cache.go
- revtree.go
- sequence_allocator.go
- sequence_hasher.go
- sequence_id.go
- shadower.go
- special_docs.go
- statistics.go
- users.go