aboutsummaryrefslogtreecommitdiffstats
path: root/src/tag.c
diff options
context:
space:
mode:
authorMax Kellermann <max@duempel.org>2008-08-29 09:39:08 +0200
committerMax Kellermann <max@duempel.org>2008-08-29 09:39:08 +0200
commit1aa3457346b6c88d4d43e6faf5cde2ae95f36275 (patch)
treec34d6f76e5cb724711737432179342198c5b4f91 /src/tag.c
parent031522060aa64fea48e30480bee9b5daa3737089 (diff)
downloadmpd-1aa3457346b6c88d4d43e6faf5cde2ae95f36275.tar.gz
mpd-1aa3457346b6c88d4d43e6faf5cde2ae95f36275.tar.xz
mpd-1aa3457346b6c88d4d43e6faf5cde2ae95f36275.zip
tag: try not to reallocate tag.items in every add() call
If many tag_items are added at once while the tag cache is being loaded, manage these items in a static fixed list, instead of reallocating the list with every newly created item. This reduces heap fragmentation. Massif results again: mk before: total 12,837,632; useful 10,626,383; extra 2,211,249 mk now: total 12,736,720; useful 10,626,383; extra 2,110,337 The "useful" value is the same since this patch only changes the way we allocate the same amount of memory, but heap fragmentation was reduced by 5%.
Diffstat (limited to 'src/tag.c')
-rw-r--r--src/tag.c62
1 files changed, 60 insertions, 2 deletions
diff --git a/src/tag.c b/src/tag.c
index a27417554..f9dc767f2 100644
--- a/src/tag.c
+++ b/src/tag.c
@@ -361,6 +361,53 @@ static inline const char *fix_utf8(const char *str, size_t *length_r) {
return temp;
}
+/**
+ * Maximum number of items managed in the bulk list; if it is
+ * exceeded, we switch back to "normal" reallocation.
+ */
+#define BULK_MAX 64
+
+static struct {
+#ifndef NDEBUG
+ int busy;
+#endif
+ struct tag_item *items[BULK_MAX];
+} bulk;
+
+void tag_begin_add(struct tag *tag)
+{
+ assert(!bulk.busy);
+ assert(tag != NULL);
+ assert(tag->items == NULL);
+ assert(tag->numOfItems == 0);
+
+#ifndef NDEBUG
+ bulk.busy = 1;
+#endif
+ tag->items = bulk.items;
+}
+
+void tag_end_add(struct tag *tag)
+{
+ if (tag->items == bulk.items) {
+ assert(tag->numOfItems <= BULK_MAX);
+
+ if (tag->numOfItems > 0) {
+ /* copy the tag items from the bulk list over
+ to a new list (which fits exactly) */
+ tag->items = xmalloc(tag->numOfItems *
+ sizeof(tag->items[0]));
+ memcpy(tag->items, bulk.items,
+ tag->numOfItems * sizeof(tag->items[0]));
+ } else
+ tag->items = NULL;
+ }
+
+#ifndef NDEBUG
+ bulk.busy = 0;
+#endif
+}
+
static void appendToTagItems(struct tag *tag, enum tag_type type,
const char *value, size_t len)
{
@@ -380,8 +427,19 @@ static void appendToTagItems(struct tag *tag, enum tag_type type,
}
tag->numOfItems++;
- tag->items = xrealloc(tag->items,
- tag->numOfItems * sizeof(*tag->items));
+
+ if (tag->items != bulk.items)
+ /* bulk mode disabled */
+ tag->items = xrealloc(tag->items,
+ tag->numOfItems * sizeof(*tag->items));
+ else if (tag->numOfItems >= BULK_MAX) {
+ /* bulk list already full - switch back to non-bulk */
+ assert(bulk.busy);
+
+ tag->items = xmalloc(tag->numOfItems * sizeof(tag->items[0]));
+ memcpy(tag->items, bulk.items,
+ (tag->numOfItems - 1) * sizeof(tag->items[0]));
+ }
tag->items[i] = tag_pool_get_item(type, p, len);