git/builtin/pack-objects.c

#define USE_THE_REPOSITORY_VARIABLE
#include "builtin.h"
#include "environment.h"
#include "gettext.h"
#include "hex.h"
#include "config.h"
#include "attr.h"
#include "object.h"
#include "commit.h"
#include "tag.h"
#include "delta.h"
#include "pack.h"
#include "pack-revindex.h"
#include "csum-file.h"
#include "tree-walk.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "list-objects-filter-options.h"
#include "pack-objects.h"
#include "progress.h"
#include "refs.h"
#include "streaming.h"
#include "thread-utils.h"
#include "pack-bitmap.h"
#include "delta-islands.h"
#include "reachable.h"
#include "oid-array.h"
#include "strvec.h"
#include "list.h"
#include "packfile.h"
#include "object-file.h"
#include "object-store-ll.h"
#include "replace-object.h"
#include "dir.h"
#include "midx.h"
#include "trace2.h"
#include "shallow.h"
#include "promisor-remote.h"
#include "pack-mtimes.h"
#include "parse-options.h"

/*
 * Objects we are going to pack are collected in the `to_pack` structure.
 * It contains an array (dynamically expanded) of the object data, and a map
 * that can resolve SHA1s to their position in the array.
 */
static struct packing_data to_pack;

static inline struct object_entry *oe_delta(
		const struct packing_data *pack,
		const struct object_entry *e)
{}

static inline unsigned long oe_delta_size(struct packing_data *pack,
					  const struct object_entry *e)
{}

unsigned long oe_get_size_slow(struct packing_data *pack,
			       const struct object_entry *e);

static inline unsigned long oe_size(struct packing_data *pack,
				    const struct object_entry *e)
{}

static inline void oe_set_delta(struct packing_data *pack,
				struct object_entry *e,
				struct object_entry *delta)
{}

static inline struct object_entry *oe_delta_sibling(
		const struct packing_data *pack,
		const struct object_entry *e)
{}

static inline struct object_entry *oe_delta_child(
		const struct packing_data *pack,
		const struct object_entry *e)
{}

static inline void oe_set_delta_child(struct packing_data *pack,
				      struct object_entry *e,
				      struct object_entry *delta)
{}

static inline void oe_set_delta_sibling(struct packing_data *pack,
					struct object_entry *e,
					struct object_entry *delta)
{}

static inline void oe_set_size(struct packing_data *pack,
			       struct object_entry *e,
			       unsigned long size)
{}

static inline void oe_set_delta_size(struct packing_data *pack,
				     struct object_entry *e,
				     unsigned long size)
{}

#define IN_PACK(obj)
#define SIZE(obj)
#define SET_SIZE(obj,size)
#define DELTA_SIZE(obj)
#define DELTA(obj)
#define DELTA_CHILD(obj)
#define DELTA_SIBLING(obj)
#define SET_DELTA(obj, val)
#define SET_DELTA_EXT(obj, oid)
#define SET_DELTA_SIZE(obj, val)
#define SET_DELTA_CHILD(obj, val)
#define SET_DELTA_SIBLING(obj, val)

static const char *pack_usage[] =;

static struct pack_idx_entry **written_list;
static uint32_t nr_result, nr_written, nr_seen;
static struct bitmap_index *bitmap_git;
static uint32_t write_layer;

static int non_empty;
static int reuse_delta =, reuse_object =;
static int keep_unreachable, unpack_unreachable, include_tag;
static timestamp_t unpack_unreachable_expiration;
static int pack_loose_unreachable;
static int cruft;
static timestamp_t cruft_expiration;
static int local;
static int have_non_local_packs;
static int incremental;
static int ignore_packed_keep_on_disk;
static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
static int progress =;
static int window =;
static unsigned long pack_size_limit;
static int depth =;
static int delta_search_threads;
static int pack_to_stdout;
static int sparse;
static int thin;
static int num_preferred_base;
static struct progress *progress_state;

static struct bitmapped_pack *reuse_packfiles;
static size_t reuse_packfiles_nr;
static size_t reuse_packfiles_used_nr;
static uint32_t reuse_packfile_objects;
static struct bitmap *reuse_packfile_bitmap;

static int use_bitmap_index_default =;
static int use_bitmap_index =;
static enum {} allow_pack_reuse =;
static enum {} write_bitmap_index;
static uint16_t write_bitmap_options =;

static int exclude_promisor_objects;

static int use_delta_islands;

static unsigned long delta_cache_size =;
static unsigned long max_delta_cache_size =;
static unsigned long cache_max_small_delta_size =;

static unsigned long window_memory_limit =;

static struct string_list uri_protocols =;

enum missing_action {};
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;

struct configured_exclusion {};
static struct oidmap configured_exclusions;

static struct oidset excluded_by_config;

/*
 * stats
 */
static uint32_t written, written_delta;
static uint32_t reused, reused_delta;

/*
 * Indexed commits
 */
static struct commit **indexed_commits;
static unsigned int indexed_commits_nr;
static unsigned int indexed_commits_alloc;

static void index_commit_for_bitmap(struct commit *commit)
{}

static void *get_delta(struct object_entry *entry)
{}

static unsigned long do_compress(void **pptr, unsigned long size)
{}

static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
					   const struct object_id *oid)
{}

/*
 * we are going to reuse the existing object data as is.  make
 * sure it is not corrupt.
 */
static int check_pack_inflate(struct packed_git *p,
		struct pack_window **w_curs,
		off_t offset,
		off_t len,
		unsigned long expect)
{}

static void copy_pack_data(struct hashfile *f,
		struct packed_git *p,
		struct pack_window **w_curs,
		off_t offset,
		off_t len)
{}

static inline int oe_size_greater_than(struct packing_data *pack,
				       const struct object_entry *lhs,
				       unsigned long rhs)
{}

/* Return 0 if we will bust the pack-size limit */
static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
					   unsigned long limit, int usable_delta)
{}

/* Return 0 if we will bust the pack-size limit */
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
				unsigned long limit, int usable_delta)
{}

/* Return 0 if we will bust the pack-size limit */
static off_t write_object(struct hashfile *f,
			  struct object_entry *entry,
			  off_t write_offset)
{}

enum write_one_status {};

static enum write_one_status write_one(struct hashfile *f,
				       struct object_entry *e,
				       off_t *offset)
{}

static int mark_tagged(const char *path UNUSED, const char *referent UNUSED, const struct object_id *oid,
		       int flag UNUSED, void *cb_data UNUSED)
{}

static inline unsigned char oe_layer(struct packing_data *pack,
				     struct object_entry *e)
{}

static inline void add_to_write_order(struct object_entry **wo,
			       unsigned int *endp,
			       struct object_entry *e)
{}

static void add_descendants_to_write_order(struct object_entry **wo,
					   unsigned int *endp,
					   struct object_entry *e)
{}

static void add_family_to_write_order(struct object_entry **wo,
				      unsigned int *endp,
				      struct object_entry *e)
{}

static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
{}

static struct object_entry **compute_write_order(void)
{}


/*
 * A reused set of objects. All objects in a chunk have the same
 * relative position in the original packfile and the generated
 * packfile.
 */

static struct reused_chunk {} *reused_chunks;
static int reused_chunks_nr;
static int reused_chunks_alloc;

static void record_reused_object(off_t where, off_t offset)
{}

/*
 * Binary search to find the chunk that "where" is in. Note
 * that we're not looking for an exact match, just the first
 * chunk that contains it (which implicitly ends at the start
 * of the next chunk.
 */
static off_t find_reused_offset(off_t where)
{}

static void write_reused_pack_one(struct packed_git *reuse_packfile,
				  size_t pos, struct hashfile *out,
				  off_t pack_start,
				  struct pack_window **w_curs)
{}

static size_t write_reused_pack_verbatim(struct bitmapped_pack *reuse_packfile,
					 struct hashfile *out,
					 off_t pack_start,
					 struct pack_window **w_curs)
{}

static void write_reused_pack(struct bitmapped_pack *reuse_packfile,
			      struct hashfile *f)
{}

static void write_excluded_by_configs(void)
{}

static const char no_split_warning[] =
);

static void write_pack_file(void)
{}

static int no_try_delta(const char *path)
{}

/*
 * When adding an object, check whether we have already added it
 * to our packing list. If so, we can skip. However, if we are
 * being asked to excludei t, but the previous mention was to include
 * it, make sure to adjust its flags and tweak our numbers accordingly.
 *
 * As an optimization, we pass out the index position where we would have
 * found the item, since that saves us from having to look it up again a
 * few lines later when we want to add the new entry.
 */
static int have_duplicate_entry(const struct object_id *oid,
				int exclude)
{}

static int want_found_object(const struct object_id *oid, int exclude,
			     struct packed_git *p)
{}

static int want_object_in_pack_one(struct packed_git *p,
				   const struct object_id *oid,
				   int exclude,
				   struct packed_git **found_pack,
				   off_t *found_offset)
{}

/*
 * Check whether we want the object in the pack (e.g., we do not want
 * objects found in non-local stores if the "--local" option was used).
 *
 * If the caller already knows an existing pack it wants to take the object
 * from, that is passed in *found_pack and *found_offset; otherwise this
 * function finds if there is any pack that has the object and returns the pack
 * and its offset in these variables.
 */
static int want_object_in_pack(const struct object_id *oid,
			       int exclude,
			       struct packed_git **found_pack,
			       off_t *found_offset)
{}

static struct object_entry *create_object_entry(const struct object_id *oid,
						enum object_type type,
						uint32_t hash,
						int exclude,
						int no_try_delta,
						struct packed_git *found_pack,
						off_t found_offset)
{}

static const char no_closure_warning[] =
);

static int add_object_entry(const struct object_id *oid, enum object_type type,
			    const char *name, int exclude)
{}

static int add_object_entry_from_bitmap(const struct object_id *oid,
					enum object_type type,
					int flags UNUSED, uint32_t name_hash,
					struct packed_git *pack, off_t offset)
{}

struct pbase_tree_cache {};

static struct pbase_tree_cache *(pbase_tree_cache[256]);
static int pbase_tree_cache_ix(const struct object_id *oid)
{}
static int pbase_tree_cache_ix_incr(int ix)
{}

static struct pbase_tree {} *pbase_tree;

static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
{}

static void pbase_tree_put(struct pbase_tree_cache *cache)
{}

static size_t name_cmp_len(const char *name)
{}

static void add_pbase_object(struct tree_desc *tree,
			     const char *name,
			     size_t cmplen,
			     const char *fullname)
{}

static unsigned *done_pbase_paths;
static int done_pbase_paths_num;
static int done_pbase_paths_alloc;
static int done_pbase_path_pos(unsigned hash)
{}

static int check_pbase_path(unsigned hash)
{}

static void add_preferred_base_object(const char *name)
{}

static void add_preferred_base(struct object_id *oid)
{}

static void cleanup_preferred_base(void)
{}

/*
 * Return 1 iff the object specified by "delta" can be sent
 * literally as a delta against the base in "base_sha1". If
 * so, then *base_out will point to the entry in our packing
 * list, or NULL if we must use the external-base list.
 *
 * Depth value does not matter - find_deltas() will
 * never consider reused delta as the base object to
 * deltify other objects against, in order to avoid
 * circular deltas.
 */
static int can_reuse_delta(const struct object_id *base_oid,
			   struct object_entry *delta,
			   struct object_entry **base_out)
{}

static void prefetch_to_pack(uint32_t object_index_start) {}

static void check_object(struct object_entry *entry, uint32_t object_index)
{}

static int pack_offset_sort(const void *_a, const void *_b)
{}

/*
 * Drop an on-disk delta we were planning to reuse. Naively, this would
 * just involve blanking out the "delta" field, but we have to deal
 * with some extra book-keeping:
 *
 *   1. Removing ourselves from the delta_sibling linked list.
 *
 *   2. Updating our size/type to the non-delta representation. These were
 *      either not recorded initially (size) or overwritten with the delta type
 *      (type) when check_object() decided to reuse the delta.
 *
 *   3. Resetting our delta depth, as we are now a base object.
 */
static void drop_reused_delta(struct object_entry *entry)
{}

/*
 * Follow the chain of deltas from this entry onward, throwing away any links
 * that cause us to hit a cycle (as determined by the DFS state flags in
 * the entries).
 *
 * We also detect too-long reused chains that would violate our --depth
 * limit.
 */
static void break_delta_chains(struct object_entry *entry)
{}

static void get_object_details(void)
{}

/*
 * We search for deltas in a list sorted by type, by filename hash, and then
 * by size, so that we see progressively smaller and smaller files.
 * That's because we prefer deltas to be from the bigger file
 * to the smaller -- deletes are potentially cheaper, but perhaps
 * more importantly, the bigger file is likely the more recent
 * one.  The deepest deltas are therefore the oldest objects which are
 * less susceptible to be accessed often.
 */
static int type_size_sort(const void *_a, const void *_b)
{}

struct unpacked {};

static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
			   unsigned long delta_size)
{}

/* Protect delta_cache_size */
static pthread_mutex_t cache_mutex;
#define cache_lock()
#define cache_unlock()

/*
 * Protect object list partitioning (e.g. struct thread_param) and
 * progress_state
 */
static pthread_mutex_t progress_mutex;
#define progress_lock()
#define progress_unlock()

/*
 * Access to struct object_entry is unprotected since each thread owns
 * a portion of the main object list. Just don't access object entries
 * ahead in the list because they can be stolen and would need
 * progress_mutex for protection.
 */

static inline int oe_size_less_than(struct packing_data *pack,
				    const struct object_entry *lhs,
				    unsigned long rhs)
{}

static inline void oe_set_tree_depth(struct packing_data *pack,
				     struct object_entry *e,
				     unsigned int tree_depth)
{}

/*
 * Return the size of the object without doing any delta
 * reconstruction (so non-deltas are true object sizes, but deltas
 * return the size of the delta data).
 */
unsigned long oe_get_size_slow(struct packing_data *pack,
			       const struct object_entry *e)
{}

static int try_delta(struct unpacked *trg, struct unpacked *src,
		     unsigned max_depth, unsigned long *mem_usage)
{}

static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{}

static unsigned long free_unpacked(struct unpacked *n)
{}

static void find_deltas(struct object_entry **list, unsigned *list_size,
			int window, int depth, unsigned *processed)
{}

/*
 * The main object list is split into smaller lists, each is handed to
 * one worker.
 *
 * The main thread waits on the condition that (at least) one of the workers
 * has stopped working (which is indicated in the .working member of
 * struct thread_params).
 *
 * When a work thread has completed its work, it sets .working to 0 and
 * signals the main thread and waits on the condition that .data_ready
 * becomes 1.
 *
 * The main thread steals half of the work from the worker that has
 * most work left to hand it to the idle worker.
 */

struct thread_params {};

static pthread_cond_t progress_cond;

/*
 * Mutex and conditional variable can't be statically-initialized on Windows.
 */
static void init_threaded_search(void)
{}

static void cleanup_threaded_search(void)
{}

static void *threaded_find_deltas(void *arg)
{}

static void ll_find_deltas(struct object_entry **list, unsigned list_size,
			   int window, int depth, unsigned *processed)
{}

static int obj_is_packed(const struct object_id *oid)
{}

static void add_tag_chain(const struct object_id *oid)
{}

static int add_ref_tag(const char *tag UNUSED, const char *referent UNUSED, const struct object_id *oid,
		       int flag UNUSED, void *cb_data UNUSED)
{}

static void prepare_pack(int window, int depth)
{}

static int git_pack_config(const char *k, const char *v,
			   const struct config_context *ctx, void *cb)
{}

/* Counters for trace2 output when in --stdin-packs mode. */
static int stdin_packs_found_nr;
static int stdin_packs_hints_nr;

static int add_object_entry_from_pack(const struct object_id *oid,
				      struct packed_git *p,
				      uint32_t pos,
				      void *_data)
{}

static void show_commit_pack_hint(struct commit *commit UNUSED,
				  void *data UNUSED)
{}

static void show_object_pack_hint(struct object *object, const char *name,
				  void *data UNUSED)
{}

static int pack_mtime_cmp(const void *_a, const void *_b)
{}

static void read_packs_list_from_stdin(void)
{}

static void add_cruft_object_entry(const struct object_id *oid, enum object_type type,
				   struct packed_git *pack, off_t offset,
				   const char *name, uint32_t mtime)
{}

static void show_cruft_object(struct object *obj, const char *name, void *data UNUSED)
{}

static void show_cruft_commit(struct commit *commit, void *data)
{}

static int cruft_include_check_obj(struct object *obj, void *data UNUSED)
{}

static int cruft_include_check(struct commit *commit, void *data)
{}

static void set_cruft_mtime(const struct object *object,
			    struct packed_git *pack,
			    off_t offset, time_t mtime)
{}

static void mark_pack_kept_in_core(struct string_list *packs, unsigned keep)
{}

static void add_unreachable_loose_objects(void);
static void add_objects_in_unpacked_packs(void);

static void enumerate_cruft_objects(void)
{}

static void enumerate_and_traverse_cruft_objects(struct string_list *fresh_packs)
{}

static void read_cruft_objects(void)
{}

static void read_object_list_from_stdin(void)
{}

static void show_commit(struct commit *commit, void *data UNUSED)
{}

static void show_object(struct object *obj, const char *name,
			void *data UNUSED)
{}

static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
{}

static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
{}

static int option_parse_missing_action(const struct option *opt UNUSED,
				       const char *arg, int unset)
{}

static void show_edge(struct commit *commit)
{}

static int add_object_in_unpacked_pack(const struct object_id *oid,
				       struct packed_git *pack,
				       uint32_t pos,
				       void *data UNUSED)
{}

static void add_objects_in_unpacked_packs(void)
{}

static int add_loose_object(const struct object_id *oid, const char *path,
			    void *data UNUSED)
{}

/*
 * We actually don't even have to worry about reachability here.
 * add_object_entry will weed out duplicates, so we just add every
 * loose object we find.
 */
static void add_unreachable_loose_objects(void)
{}

static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
{}

/*
 * Store a list of sha1s that are should not be discarded
 * because they are either written too recently, or are
 * reachable from another object that was.
 *
 * This is filled by get_object_list.
 */
static struct oid_array recent_objects;

static int loosened_object_can_be_discarded(const struct object_id *oid,
					    timestamp_t mtime)
{}

static void loosen_unused_packed_objects(void)
{}

/*
 * This tracks any options which pack-reuse code expects to be on, or which a
 * reader of the pack might not understand, and which would therefore prevent
 * blind reuse of what we have on disk.
 */
static int pack_options_allow_reuse(void)
{}

static int get_object_list_from_bitmap(struct rev_info *revs)
{}

static void record_recent_object(struct object *obj,
				 const char *name UNUSED,
				 void *data UNUSED)
{}

static void record_recent_commit(struct commit *commit, void *data UNUSED)
{}

static int mark_bitmap_preferred_tip(const char *refname,
				     const char *referent UNUSED,
				     const struct object_id *oid,
				     int flags UNUSED,
				     void *data UNUSED)
{}

static void mark_bitmap_preferred_tips(void)
{}

static void get_object_list(struct rev_info *revs, int ac, const char **av)
{}

static void add_extra_kept_packs(const struct string_list *names)
{}

static int option_parse_quiet(const struct option *opt, const char *arg,
			      int unset)
{}

static int option_parse_index_version(const struct option *opt,
				      const char *arg, int unset)
{}

static int option_parse_unpack_unreachable(const struct option *opt UNUSED,
					   const char *arg, int unset)
{}

static int option_parse_cruft_expiration(const struct option *opt UNUSED,
					 const char *arg, int unset)
{}

int cmd_pack_objects(int argc,
		     const char **argv,
		     const char *prefix,
		     struct repository *repo UNUSED)
{}