Skip to content

gitbase: use DotGit for managing pack objects #302

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
[[constraint]]
name = "gopkg.in/src-d/go-git.v4"
source = "github.com/src-d/go-git"
revision = "0710c6cb710a0cdab04ab7f61cc62e23cfcacbee"

[[constraint]]
name = "gopkg.in/src-d/go-git-fixtures.v3"
Expand Down
170 changes: 152 additions & 18 deletions blobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (

"gopkg.in/src-d/go-mysql-server.v0/sql"
"gopkg.in/src-d/go-mysql-server.v0/sql/expression"
"gopkg.in/src-d/go-mysql-server.v0/sql/plan"

"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/object"
Expand Down Expand Up @@ -39,14 +40,13 @@ var BlobsSchema = sql.Schema{
var _ sql.PushdownProjectionAndFiltersTable = (*blobsTable)(nil)

func newBlobsTable() Indexable {
return &indexableTable{
PushdownTable: new(blobsTable),
buildIterWithSelectors: blobsIterBuilder,
}
return new(blobsTable)
}

var _ Table = (*blobsTable)(nil)
var _ Squashable = (*blobsTable)(nil)

func (blobsTable) isSquashable() {}
func (blobsTable) isGitbaseTable() {}

func (blobsTable) String() string {
Expand Down Expand Up @@ -118,6 +118,50 @@ func (r *blobsTable) WithProjectAndFilters(
return sql.NewSpanIter(span, iter), nil
}

// IndexKeyValueIter implements the sql.Indexable interface.
func (*blobsTable) IndexKeyValueIter(
ctx *sql.Context,
colNames []string,
) (sql.IndexKeyValueIter, error) {
s, ok := ctx.Session.(*Session)
if !ok || s == nil {
return nil, ErrInvalidGitbaseSession.New(ctx.Session)
}

return newBlobsKeyValueIter(s.Pool, colNames), nil
}

// WithProjectFiltersAndIndex implements sql.Indexable interface.
func (*blobsTable) WithProjectFiltersAndIndex(
ctx *sql.Context,
columns, filters []sql.Expression,
index sql.IndexValueIter,
) (sql.RowIter, error) {
span, ctx := ctx.Span("gitbase.BlobsTable.WithProjectFiltersAndIndex")
s, ok := ctx.Session.(*Session)
if !ok || s == nil {
span.Finish()
return nil, ErrInvalidGitbaseSession.New(ctx.Session)
}

session, err := getSession(ctx)
if err != nil {
return nil, err
}

var iter sql.RowIter = &blobsIndexIter{
index: index,
pool: session.Pool,
readContent: shouldReadContent(columns),
}

if len(filters) > 0 {
iter = plan.NewFilterIter(ctx, expression.JoinAnd(filters...), iter)
}

return sql.NewSpanIter(span, iter), nil
}

func blobsIterBuilder(_ *sql.Context, selectors selectors, columns []sql.Expression) (RowRepoIter, error) {
if len(selectors["blob_hash"]) == 0 {
return &blobIter{readContent: shouldReadContent(columns)}, nil
Expand All @@ -138,7 +182,6 @@ type blobIter struct {
repoID string
iter *object.BlobIter
readContent bool
lastHash string
}

func (i *blobIter) NewIterator(repo *Repository) (RowRepoIter, error) {
Expand All @@ -150,17 +193,11 @@ func (i *blobIter) NewIterator(repo *Repository) (RowRepoIter, error) {
return &blobIter{repoID: repo.ID, iter: iter, readContent: i.readContent}, nil
}

func (i *blobIter) Repository() string { return i.repoID }

func (i *blobIter) LastObject() string { return i.lastHash }

func (i *blobIter) Next() (sql.Row, error) {
o, err := i.iter.Next()
if err != nil {
return nil, err
}

i.lastHash = o.Hash.String()
return blobToRow(i.repoID, o, i.readContent)
}

Expand All @@ -177,17 +214,12 @@ type blobsByHashIter struct {
pos int
hashes []string
readContent bool
lastHash string
}

func (i *blobsByHashIter) NewIterator(repo *Repository) (RowRepoIter, error) {
return &blobsByHashIter{repo, 0, i.hashes, i.readContent, ""}, nil
return &blobsByHashIter{repo, 0, i.hashes, i.readContent}, nil
}

func (i *blobsByHashIter) Repository() string { return i.repo.ID }

func (i *blobsByHashIter) LastObject() string { return i.lastHash }

func (i *blobsByHashIter) Next() (sql.Row, error) {
for {
if i.pos >= len(i.hashes) {
Expand All @@ -205,7 +237,6 @@ func (i *blobsByHashIter) Next() (sql.Row, error) {
return nil, err
}

i.lastHash = hash.String()
return blobToRow(i.repo.ID, blob, i.readContent)
}
}
Expand Down Expand Up @@ -304,3 +335,106 @@ func shouldReadContent(columns []sql.Expression) bool {
}
return false
}

type blobsKeyValueIter struct {
iter *objectIter
columns []string
}

func newBlobsKeyValueIter(pool *RepositoryPool, columns []string) *blobsKeyValueIter {
return &blobsKeyValueIter{
iter: newObjectIter(pool, plumbing.BlobObject),
columns: columns,
}
}

func (i *blobsKeyValueIter) Next() ([]interface{}, []byte, error) {
obj, err := i.iter.Next()
if err != nil {
return nil, nil, err
}

key, err := encodeIndexKey(packOffsetIndexKey{
Repository: obj.RepositoryID,
Packfile: obj.Packfile.String(),
Offset: int64(obj.Offset),
})
if err != nil {
return nil, nil, err
}

blob, ok := obj.Object.(*object.Blob)
if !ok {
ErrInvalidObjectType.New(obj.Object, "*object.Blob")
}

row, err := blobToRow(obj.RepositoryID, blob, stringContains(i.columns, "blob_content"))
if err != nil {
return nil, nil, err
}

values, err := rowIndexValues(row, i.columns, BlobsSchema)
if err != nil {
return nil, nil, err
}

return values, key, nil
}

func (i *blobsKeyValueIter) Close() error { return i.iter.Close() }

type blobsIndexIter struct {
index sql.IndexValueIter
pool *RepositoryPool
decoder *objectDecoder
readContent bool
}

func (i *blobsIndexIter) Next() (sql.Row, error) {
data, err := i.index.Next()
if err != nil {
return nil, err
}

var key packOffsetIndexKey
if err := decodeIndexKey(data, &key); err != nil {
return nil, err
}

packfile := plumbing.NewHash(key.Packfile)
if i.decoder == nil || !i.decoder.equals(key.Repository, packfile) {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
return nil, err
}
}

i.decoder, err = newObjectDecoder(i.pool.repositories[key.Repository], packfile)
if err != nil {
return nil, err
}
}

obj, err := i.decoder.get(key.Offset)
if err != nil {
return nil, err
}

blob, ok := obj.(*object.Blob)
if !ok {
return nil, ErrInvalidObjectType.New(obj, "*object.Blob")
}

return blobToRow(key.Repository, blob, i.readContent)
}

func (i *blobsIndexIter) Close() error {
if i.decoder != nil {
if err := i.decoder.Close(); err != nil {
_ = i.index.Close()
return err
}
}

return i.index.Close()
}
Loading