Implement Milestone 1
This commit is contained in:
201
internal/storage/oplog_reader.go
Normal file
201
internal/storage/oplog_reader.go
Normal file
@ -0,0 +1,201 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"git.dws.rip/DWS/onyx/internal/models"
|
||||
)
|
||||
|
||||
// OplogReader handles reading entries from the oplog file
|
||||
type OplogReader struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewOplogReader creates a new oplog reader for the given file path
|
||||
func NewOplogReader(path string) *OplogReader {
|
||||
return &OplogReader{
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
// ReadLastEntry reads the last (most recent) entry in the oplog
|
||||
func (r *OplogReader) ReadLastEntry() (*models.OplogEntry, error) {
|
||||
file, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open oplog file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var lastEntry *models.OplogEntry
|
||||
|
||||
// Read through all entries to find the last one
|
||||
for {
|
||||
// Read entry length (4 bytes)
|
||||
var entryLen uint32
|
||||
err := binary.Read(file, binary.LittleEndian, &entryLen)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read entry length: %w", err)
|
||||
}
|
||||
|
||||
// Read the entry data
|
||||
entryData := make([]byte, entryLen)
|
||||
n, err := file.Read(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read entry data: %w", err)
|
||||
}
|
||||
if n != int(entryLen) {
|
||||
return nil, fmt.Errorf("incomplete entry data read: expected %d bytes, got %d", entryLen, n)
|
||||
}
|
||||
|
||||
// Deserialize the entry
|
||||
entry, err := models.DeserializeOplogEntry(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize entry: %w", err)
|
||||
}
|
||||
|
||||
lastEntry = entry
|
||||
}
|
||||
|
||||
if lastEntry == nil {
|
||||
return nil, fmt.Errorf("oplog is empty")
|
||||
}
|
||||
|
||||
return lastEntry, nil
|
||||
}
|
||||
|
||||
// ReadEntry reads a specific entry by ID
|
||||
func (r *OplogReader) ReadEntry(id uint64) (*models.OplogEntry, error) {
|
||||
file, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open oplog file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Read through all entries to find the one with matching ID
|
||||
for {
|
||||
// Read entry length (4 bytes)
|
||||
var entryLen uint32
|
||||
err := binary.Read(file, binary.LittleEndian, &entryLen)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read entry length: %w", err)
|
||||
}
|
||||
|
||||
// Read the entry data
|
||||
entryData := make([]byte, entryLen)
|
||||
n, err := file.Read(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read entry data: %w", err)
|
||||
}
|
||||
if n != int(entryLen) {
|
||||
return nil, fmt.Errorf("incomplete entry data read: expected %d bytes, got %d", entryLen, n)
|
||||
}
|
||||
|
||||
// Deserialize the entry
|
||||
entry, err := models.DeserializeOplogEntry(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize entry: %w", err)
|
||||
}
|
||||
|
||||
if entry.ID == id {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("entry with ID %d not found", id)
|
||||
}
|
||||
|
||||
// GetUndoStack returns a stack of entries that can be undone (in reverse order)
|
||||
func (r *OplogReader) GetUndoStack() ([]*models.OplogEntry, error) {
|
||||
entries, err := r.ReadAllEntries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Filter out entries that have already been undone
|
||||
// For now, we return all entries in reverse order
|
||||
// In the future, we might track undone entries separately
|
||||
var undoStack []*models.OplogEntry
|
||||
for i := len(entries) - 1; i >= 0; i-- {
|
||||
undoStack = append(undoStack, entries[i])
|
||||
}
|
||||
|
||||
return undoStack, nil
|
||||
}
|
||||
|
||||
// ReadAllEntries reads all entries from the oplog in order
|
||||
func (r *OplogReader) ReadAllEntries() ([]*models.OplogEntry, error) {
|
||||
file, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open oplog file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var entries []*models.OplogEntry
|
||||
|
||||
// Read through all entries
|
||||
for {
|
||||
// Read entry length (4 bytes)
|
||||
var entryLen uint32
|
||||
err := binary.Read(file, binary.LittleEndian, &entryLen)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read entry length: %w", err)
|
||||
}
|
||||
|
||||
// Read the entry data
|
||||
entryData := make([]byte, entryLen)
|
||||
n, err := file.Read(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read entry data: %w", err)
|
||||
}
|
||||
if n != int(entryLen) {
|
||||
return nil, fmt.Errorf("incomplete entry data read: expected %d bytes, got %d", entryLen, n)
|
||||
}
|
||||
|
||||
// Deserialize the entry
|
||||
entry, err := models.DeserializeOplogEntry(entryData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to deserialize entry: %w", err)
|
||||
}
|
||||
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Count returns the total number of entries in the oplog
|
||||
func (r *OplogReader) Count() (int, error) {
|
||||
entries, err := r.ReadAllEntries()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(entries), nil
|
||||
}
|
||||
|
||||
// IsEmpty checks if the oplog is empty
|
||||
func (r *OplogReader) IsEmpty() (bool, error) {
|
||||
file, err := os.Open(r.path)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to open oplog file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to stat file: %w", err)
|
||||
}
|
||||
|
||||
return stat.Size() == 0, nil
|
||||
}
|
163
internal/storage/oplog_writer.go
Normal file
163
internal/storage/oplog_writer.go
Normal file
@ -0,0 +1,163 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"git.dws.rip/DWS/onyx/internal/models"
|
||||
)
|
||||
|
||||
// OplogWriter handles writing entries to the oplog file
|
||||
type OplogWriter struct {
|
||||
path string
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
nextID uint64
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
// OpenOplog opens an existing oplog file or creates a new one
|
||||
func OpenOplog(path string) (*OplogWriter, error) {
|
||||
// Open file for append and read
|
||||
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open oplog file: %w", err)
|
||||
}
|
||||
|
||||
writer := &OplogWriter{
|
||||
path: path,
|
||||
file: file,
|
||||
nextID: 1,
|
||||
}
|
||||
|
||||
// Calculate next ID by reading existing entries
|
||||
if err := writer.calculateNextID(); err != nil {
|
||||
file.Close()
|
||||
return nil, fmt.Errorf("failed to calculate next ID: %w", err)
|
||||
}
|
||||
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// calculateNextID scans the oplog to determine the next entry ID
|
||||
func (w *OplogWriter) calculateNextID() error {
|
||||
// Seek to the beginning
|
||||
if _, err := w.file.Seek(0, 0); err != nil {
|
||||
return fmt.Errorf("failed to seek to beginning: %w", err)
|
||||
}
|
||||
|
||||
var maxID uint64 = 0
|
||||
|
||||
// Read through all entries to find the max ID
|
||||
for {
|
||||
// Read entry length (4 bytes)
|
||||
var entryLen uint32
|
||||
err := binary.Read(w.file, binary.LittleEndian, &entryLen)
|
||||
if err != nil {
|
||||
// EOF is expected at the end
|
||||
if err.Error() == "EOF" {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("failed to read entry length: %w", err)
|
||||
}
|
||||
|
||||
// Read the entry data
|
||||
entryData := make([]byte, entryLen)
|
||||
n, err := w.file.Read(entryData)
|
||||
if err != nil || n != int(entryLen) {
|
||||
return fmt.Errorf("failed to read entry data: %w", err)
|
||||
}
|
||||
|
||||
// Deserialize to get the ID
|
||||
entry, err := models.DeserializeOplogEntry(entryData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to deserialize entry: %w", err)
|
||||
}
|
||||
|
||||
if entry.ID > maxID {
|
||||
maxID = entry.ID
|
||||
}
|
||||
}
|
||||
|
||||
w.nextID = maxID + 1
|
||||
|
||||
// Seek to the end for appending
|
||||
if _, err := w.file.Seek(0, 2); err != nil {
|
||||
return fmt.Errorf("failed to seek to end: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendEntry appends a new entry to the oplog
|
||||
func (w *OplogWriter) AppendEntry(entry *models.OplogEntry) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.isClosed {
|
||||
return fmt.Errorf("oplog writer is closed")
|
||||
}
|
||||
|
||||
// Assign ID if not set
|
||||
if entry.ID == 0 {
|
||||
entry.ID = w.nextID
|
||||
w.nextID++
|
||||
}
|
||||
|
||||
// Serialize the entry
|
||||
data, err := entry.Serialize()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize entry: %w", err)
|
||||
}
|
||||
|
||||
// Write entry length (4 bytes) followed by entry data
|
||||
entryLen := uint32(len(data))
|
||||
if err := binary.Write(w.file, binary.LittleEndian, entryLen); err != nil {
|
||||
return fmt.Errorf("failed to write entry length: %w", err)
|
||||
}
|
||||
|
||||
if _, err := w.file.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write entry data: %w", err)
|
||||
}
|
||||
|
||||
// Sync to disk for durability
|
||||
if err := w.file.Sync(); err != nil {
|
||||
return fmt.Errorf("failed to sync file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNextID returns the next entry ID that will be assigned
|
||||
func (w *OplogWriter) GetNextID() uint64 {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.nextID
|
||||
}
|
||||
|
||||
// Close closes the oplog file
|
||||
func (w *OplogWriter) Close() error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.isClosed = true
|
||||
return w.file.Close()
|
||||
}
|
||||
|
||||
// Flush ensures all buffered data is written to disk
|
||||
func (w *OplogWriter) Flush() error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.isClosed {
|
||||
return fmt.Errorf("oplog writer is closed")
|
||||
}
|
||||
|
||||
return w.file.Sync()
|
||||
}
|
187
internal/storage/state.go
Normal file
187
internal/storage/state.go
Normal file
@ -0,0 +1,187 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.dws.rip/DWS/onyx/internal/models"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
// StateCapture provides functionality to capture repository state
|
||||
type StateCapture struct {
|
||||
repo *git.Repository
|
||||
}
|
||||
|
||||
// NewStateCapture creates a new StateCapture instance
|
||||
func NewStateCapture(repo *git.Repository) *StateCapture {
|
||||
return &StateCapture{
|
||||
repo: repo,
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureState captures the current state of the repository
|
||||
func (s *StateCapture) CaptureState() (*models.RepositoryState, error) {
|
||||
refs, err := s.captureRefs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to capture refs: %w", err)
|
||||
}
|
||||
|
||||
currentWorkstream, err := s.getCurrentWorkstream()
|
||||
if err != nil {
|
||||
// It's okay if there's no current workstream (e.g., in detached HEAD state)
|
||||
currentWorkstream = ""
|
||||
}
|
||||
|
||||
workingTreeHash, err := s.getWorkingTreeHash()
|
||||
if err != nil {
|
||||
// Working tree hash might not be available in a fresh repo
|
||||
workingTreeHash = ""
|
||||
}
|
||||
|
||||
indexHash, err := s.getIndexHash()
|
||||
if err != nil {
|
||||
// Index hash might not be available in a fresh repo
|
||||
indexHash = ""
|
||||
}
|
||||
|
||||
return models.NewRepositoryState(refs, currentWorkstream, workingTreeHash, indexHash), nil
|
||||
}
|
||||
|
||||
// captureRefs captures all Git references (branches, tags, etc.)
|
||||
func (s *StateCapture) captureRefs() (map[string]string, error) {
|
||||
refs := make(map[string]string)
|
||||
|
||||
refIter, err := s.repo.References()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get references: %w", err)
|
||||
}
|
||||
|
||||
err = refIter.ForEach(func(ref *plumbing.Reference) error {
|
||||
if ref.Type() == plumbing.HashReference {
|
||||
refs[ref.Name().String()] = ref.Hash().String()
|
||||
} else if ref.Type() == plumbing.SymbolicReference {
|
||||
// For symbolic refs (like HEAD), store the target
|
||||
refs[ref.Name().String()] = ref.Target().String()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to iterate references: %w", err)
|
||||
}
|
||||
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
// getCurrentWorkstream determines the current workstream (branch)
|
||||
func (s *StateCapture) getCurrentWorkstream() (string, error) {
|
||||
head, err := s.repo.Head()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get HEAD: %w", err)
|
||||
}
|
||||
|
||||
if head.Name().IsBranch() {
|
||||
return head.Name().Short(), nil
|
||||
}
|
||||
|
||||
// In detached HEAD state
|
||||
return "", fmt.Errorf("in detached HEAD state")
|
||||
}
|
||||
|
||||
// getWorkingTreeHash gets a hash representing the current working tree
|
||||
func (s *StateCapture) getWorkingTreeHash() (string, error) {
|
||||
worktree, err := s.repo.Worktree()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
status, err := worktree.Status()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
// For now, we'll just check if the working tree is clean
|
||||
// In the future, we might compute an actual hash
|
||||
if status.IsClean() {
|
||||
head, err := s.repo.Head()
|
||||
if err == nil {
|
||||
return head.Hash().String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "dirty", nil
|
||||
}
|
||||
|
||||
// getIndexHash gets a hash representing the current index (staging area)
|
||||
func (s *StateCapture) getIndexHash() (string, error) {
|
||||
// For now, this is a placeholder
|
||||
// In the future, we might compute a proper hash of the index
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// RestoreState restores the repository to a previously captured state
|
||||
func (s *StateCapture) RestoreState(state *models.RepositoryState) error {
|
||||
// Restore all refs
|
||||
for refName, refHash := range state.Refs {
|
||||
ref := plumbing.NewReferenceFromStrings(refName, refHash)
|
||||
|
||||
// Skip symbolic references for now
|
||||
if ref.Type() == plumbing.SymbolicReference {
|
||||
continue
|
||||
}
|
||||
|
||||
err := s.repo.Storer.SetReference(ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to restore ref %s: %w", refName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If there's a current workstream, check it out
|
||||
if state.CurrentWorkstream != "" {
|
||||
worktree, err := s.repo.Worktree()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
err = worktree.Checkout(&git.CheckoutOptions{
|
||||
Branch: plumbing.NewBranchReferenceName(state.CurrentWorkstream),
|
||||
})
|
||||
if err != nil {
|
||||
// Don't fail if checkout fails, just log it
|
||||
// The refs have been restored which is the most important part
|
||||
fmt.Printf("Warning: failed to checkout branch %s: %v\n", state.CurrentWorkstream, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompareStates compares two repository states and returns the differences
|
||||
func (s *StateCapture) CompareStates(before, after *models.RepositoryState) map[string]string {
|
||||
differences := make(map[string]string)
|
||||
|
||||
// Check for changed/added refs
|
||||
for refName, afterHash := range after.Refs {
|
||||
beforeHash, exists := before.Refs[refName]
|
||||
if !exists {
|
||||
differences[refName] = fmt.Sprintf("added: %s", afterHash)
|
||||
} else if beforeHash != afterHash {
|
||||
differences[refName] = fmt.Sprintf("changed: %s -> %s", beforeHash, afterHash)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for deleted refs
|
||||
for refName := range before.Refs {
|
||||
if _, exists := after.Refs[refName]; !exists {
|
||||
differences[refName] = "deleted"
|
||||
}
|
||||
}
|
||||
|
||||
// Check workstream change
|
||||
if before.CurrentWorkstream != after.CurrentWorkstream {
|
||||
differences["current_workstream"] = fmt.Sprintf("changed: %s -> %s", before.CurrentWorkstream, after.CurrentWorkstream)
|
||||
}
|
||||
|
||||
return differences
|
||||
}
|
Reference in New Issue
Block a user