pb_buffer_fenced.c 26.9 KB
Newer Older
1 2
/**************************************************************************
 *
3
 * Copyright 2007-2010 VMware, Inc.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 23 24 25 26 27 28 29 30
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

/**
 * \file
 * Implementation of fenced buffers.
31 32 33
 *
 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
34 35 36
 */


37 38
#include "pipe/p_config.h"

39
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40
#include <unistd.h>
41
#include <sched.h>
42 43
#endif

44
#include "pipe/p_compiler.h"
45
#include "pipe/p_defines.h"
46
#include "util/u_debug.h"
47
#include "os/os_thread.h"
48
#include "util/u_memory.h"
49
#include "util/u_double_list.h"
50 51 52

#include "pb_buffer.h"
#include "pb_buffer_fenced.h"
53
#include "pb_bufmgr.h"
54

Michal Krol's avatar
Michal Krol committed
55

56 57 58 59 60 61 62

/**
 * Convenience macro (type safe).
 */
#define SUPER(__derived) (&(__derived)->base)


63
struct fenced_manager
64
{
65 66
   struct pb_manager base;
   struct pb_manager *provider;
67
   struct pb_fence_ops *ops;
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

   /**
    * Maximum buffer size that can be safely allocated.
    */
   pb_size max_buffer_size;

   /**
    * Maximum cpu memory we can allocate before we start waiting for the
    * GPU to idle.
    */
   pb_size max_cpu_total_size;

   /**
    * Following members are mutable and protected by this mutex.
    */
   pipe_mutex mutex;

   /**
    * Fenced buffer list.
    *
    * All fenced buffers are placed in this listed, ordered from the oldest
    * fence to the newest fence.
    */
   struct list_head fenced;
   pb_size num_fenced;

94
   struct list_head unfenced;
95 96 97 98 99 100
   pb_size num_unfenced;

   /**
    * How much temporary CPU memory is being used to hold unvalidated buffers.
    */
   pb_size cpu_total_size;
101 102 103 104
};


/**
105 106
 * Fenced buffer.
 *
107 108 109 110
 * Wrapper around a pipe buffer which adds fencing and reference counting.
 */
struct fenced_buffer
{
111 112 113 114
   /*
    * Immutable members.
    */

115
   struct pb_buffer base;
116 117 118 119 120 121 122 123 124 125 126
   struct fenced_manager *mgr;

   /*
    * Following members are mutable and protected by fenced_manager::mutex.
    */

   struct list_head head;

   /**
    * Buffer with storage.
    */
127
   struct pb_buffer *buffer;
128 129
   pb_size size;
   struct pb_desc desc;
130

131 132 133 134 135
   /**
    * Temporary CPU storage data. Used when there isn't enough GPU memory to
    * store the buffer.
    */
   void *data;
136 137

   /**
Keith Whitwell's avatar
Keith Whitwell committed
138
    * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
139 140 141 142 143
    * buffer usage.
    */
   unsigned flags;

   unsigned mapcount;
144

145 146
   struct pb_validate *vl;
   unsigned validation_flags;
147

148
   struct pipe_fence_handle *fence;
149
};
150 151


152 153 154 155 156 157 158 159
static INLINE struct fenced_manager *
fenced_manager(struct pb_manager *mgr)
{
   assert(mgr);
   return (struct fenced_manager *)mgr;
}


160 161
static INLINE struct fenced_buffer *
fenced_buffer(struct pb_buffer *buf)
162 163 164 165 166 167
{
   assert(buf);
   return (struct fenced_buffer *)buf;
}


168 169
static void
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
170

171 172 173
static enum pipe_error
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
                                        struct fenced_buffer *fenced_buf);
174

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);

static enum pipe_error
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
                                        struct fenced_buffer *fenced_buf,
                                        boolean wait);

static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);

static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);


/**
 * Dump the fenced buffer list.
 *
 * Useful to understand failures to allocate buffers.
 */
static void
fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
{
198
#ifdef DEBUG
199 200 201 202 203 204 205 206 207 208 209 210 211 212
   struct pb_fence_ops *ops = fenced_mgr->ops;
   struct list_head *curr, *next;
   struct fenced_buffer *fenced_buf;

   debug_printf("%10s %7s %8s %7s %10s %s\n",
                "buffer", "size", "refcount", "storage", "fence", "signalled");

   curr = fenced_mgr->unfenced.next;
   next = curr->next;
   while(curr != &fenced_mgr->unfenced) {
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
      assert(!fenced_buf->fence);
      debug_printf("%10p %7u %8u %7s\n",
                   (void *) fenced_buf,
Keith Whitwell's avatar
Keith Whitwell committed
213
                   fenced_buf->base.base.size,
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
                   p_atomic_read(&fenced_buf->base.base.reference.count),
                   fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
      curr = next;
      next = curr->next;
   }

   curr = fenced_mgr->fenced.next;
   next = curr->next;
   while(curr != &fenced_mgr->fenced) {
      int signaled;
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
      assert(fenced_buf->buffer);
      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
      debug_printf("%10p %7u %8u %7s %10p %s\n",
                   (void *) fenced_buf,
Keith Whitwell's avatar
Keith Whitwell committed
229
                   fenced_buf->base.base.size,
230 231 232 233 234 235 236 237 238
                   p_atomic_read(&fenced_buf->base.base.reference.count),
                   "gpu",
                   (void *) fenced_buf->fence,
                   signaled == 0 ? "y" : "n");
      curr = next;
      next = curr->next;
   }
#else
   (void)fenced_mgr;
239
#endif
240 241 242 243
}


static INLINE void
244 245
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
                             struct fenced_buffer *fenced_buf)
246 247
{
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
248

249 250 251 252
   assert(!fenced_buf->fence);
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
253 254 255 256 257 258
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;

   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
   fenced_buffer_destroy_cpu_storage_locked(fenced_buf);

259 260 261 262
   FREE(fenced_buf);
}


263 264 265 266 267
/**
 * Add the buffer to the fenced list.
 *
 * Reference count should be incremented before calling this function.
 */
268
static INLINE void
269 270 271 272
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
                         struct fenced_buffer *fenced_buf)
{
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
Keith Whitwell's avatar
Keith Whitwell committed
273
   assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
   assert(fenced_buf->fence);

   p_atomic_inc(&fenced_buf->base.base.reference.count);

   LIST_DEL(&fenced_buf->head);
   assert(fenced_mgr->num_unfenced);
   --fenced_mgr->num_unfenced;
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
   ++fenced_mgr->num_fenced;
}


/**
 * Remove the buffer from the fenced list, and potentially destroy the buffer
 * if the reference count reaches zero.
 *
 * Returns TRUE if the buffer was detroyed.
 */
static INLINE boolean
fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
                            struct fenced_buffer *fenced_buf)
295
{
296
   struct pb_fence_ops *ops = fenced_mgr->ops;
297 298

   assert(fenced_buf->fence);
299 300
   assert(fenced_buf->mgr == fenced_mgr);

301
   ops->fence_reference(ops, &fenced_buf->fence, NULL);
Keith Whitwell's avatar
Keith Whitwell committed
302
   fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
303

304 305
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
306

307
   LIST_DEL(&fenced_buf->head);
308 309 310 311 312 313 314 315 316 317
   assert(fenced_mgr->num_fenced);
   --fenced_mgr->num_fenced;

   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
   ++fenced_mgr->num_unfenced;

   if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
      fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
      return TRUE;
   }
318

319
   return FALSE;
320 321 322
}


323 324 325 326 327 328
/**
 * Wait for the fence to expire, and remove it from the fenced list.
 *
 * This function will release and re-aquire the mutex, so any copy of mutable
 * state must be discarded after calling it.
 */
329
static INLINE enum pipe_error
330 331
fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
                            struct fenced_buffer *fenced_buf)
332
{
333 334
   struct pb_fence_ops *ops = fenced_mgr->ops;
   enum pipe_error ret = PIPE_ERROR;
335

José Fonseca's avatar
José Fonseca committed
336
#if 0
337
   debug_warning("waiting for GPU");
José Fonseca's avatar
José Fonseca committed
338
#endif
339

340
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
341
   assert(fenced_buf->fence);
342

343
   if(fenced_buf->fence) {
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
      struct pipe_fence_handle *fence = NULL;
      int finished;
      boolean proceed;

      ops->fence_reference(ops, &fence, fenced_buf->fence);

      pipe_mutex_unlock(fenced_mgr->mutex);

      finished = ops->fence_finish(ops, fenced_buf->fence, 0);

      pipe_mutex_lock(fenced_mgr->mutex);

      assert(pipe_is_referenced(&fenced_buf->base.base.reference));

      /*
       * Only proceed if the fence object didn't change in the meanwhile.
       * Otherwise assume the work has been already carried out by another
       * thread that re-aquired the lock before us.
       */
      proceed = fence == fenced_buf->fence ? TRUE : FALSE;

      ops->fence_reference(ops, &fence, NULL);

      if(proceed && finished == 0) {
         /*
          * Remove from the fenced list
          */

         boolean destroyed;

         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);

         /* TODO: remove consequents buffers with the same fence? */

         assert(!destroyed);

Keith Whitwell's avatar
Keith Whitwell committed
380
         fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
381 382

         ret = PIPE_OK;
383 384 385
      }
   }

386
   return ret;
387 388 389
}


390
/**
391 392 393
 * Remove as many fenced buffers from the fenced list as possible.
 *
 * Returns TRUE if at least one buffer was removed.
394
 */
395 396 397
static boolean
fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
                                      boolean wait)
398
{
399
   struct pb_fence_ops *ops = fenced_mgr->ops;
400 401 402
   struct list_head *curr, *next;
   struct fenced_buffer *fenced_buf;
   struct pipe_fence_handle *prev_fence = NULL;
403
   boolean ret = FALSE;
404

405
   curr = fenced_mgr->fenced.next;
406
   next = curr->next;
407
   while(curr != &fenced_mgr->fenced) {
408 409 410 411
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);

      if(fenced_buf->fence != prev_fence) {
	 int signaled;
412 413

	 if (wait) {
414
	    signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
415 416 417 418 419 420 421 422

	    /*
	     * Don't return just now. Instead preemptively check if the
	     * following buffers' fences already expired, without further waits.
	     */
	    wait = FALSE;
	 }
	 else {
423
	    signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
424 425 426 427 428 429
	 }

	 if (signaled != 0) {
	    return ret;
         }

430
	 prev_fence = fenced_buf->fence;
431
      }
432
      else {
433 434 435
         /* This buffer's fence object is identical to the previous buffer's
          * fence object, so no need to check the fence again.
          */
436
	 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
437
      }
438

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
      fenced_buffer_remove_locked(fenced_mgr, fenced_buf);

      ret = TRUE;

      curr = next;
      next = curr->next;
   }

   return ret;
}


/**
 * Try to free some GPU memory by backing it up into CPU memory.
 *
 * Returns TRUE if at least one buffer was freed.
 */
static boolean
fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
{
   struct list_head *curr, *next;
   struct fenced_buffer *fenced_buf;
461

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
   curr = fenced_mgr->unfenced.next;
   next = curr->next;
   while(curr != &fenced_mgr->unfenced) {
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);

      /*
       * We can only move storage if the buffer is not mapped and not
       * validated.
       */
      if(fenced_buf->buffer &&
         !fenced_buf->mapcount &&
         !fenced_buf->vl) {
         enum pipe_error ret;

         ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
         if(ret == PIPE_OK) {
            ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
            if(ret == PIPE_OK) {
               fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
               return TRUE;
            }
            fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
         }
      }

      curr = next;
488
      next = curr->next;
489
   }
490 491

   return FALSE;
492 493 494
}


495 496 497
/**
 * Destroy CPU storage for this buffer.
 */
498
static void
499
fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
500
{
501 502 503 504 505 506 507
   if(fenced_buf->data) {
      align_free(fenced_buf->data);
      fenced_buf->data = NULL;
      assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
      fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
   }
}
508

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

/**
 * Create CPU storage for this buffer.
 */
static enum pipe_error
fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
                                        struct fenced_buffer *fenced_buf)
{
   assert(!fenced_buf->data);
   if(fenced_buf->data)
      return PIPE_OK;

   if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
      return PIPE_ERROR_OUT_OF_MEMORY;

   fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
   if(!fenced_buf->data)
      return PIPE_ERROR_OUT_OF_MEMORY;

   fenced_mgr->cpu_total_size += fenced_buf->size;

   return PIPE_OK;
}


/**
 * Destroy the GPU storage.
 */
static void
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
{
   if(fenced_buf->buffer) {
      pb_reference(&fenced_buf->buffer, NULL);
   }
}


/**
 * Try to create GPU storage for this buffer.
 *
 * This function is a shorthand around pb_manager::create_buffer for
 * fenced_buffer_create_gpu_storage_locked()'s benefit.
 */
static INLINE boolean
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
                                            struct fenced_buffer *fenced_buf)
{
   struct pb_manager *provider = fenced_mgr->provider;

   assert(!fenced_buf->buffer);

   fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
                                                fenced_buf->size,
                                                &fenced_buf->desc);
   return fenced_buf->buffer ? TRUE : FALSE;
}


/**
 * Create GPU storage for this buffer.
 */
static enum pipe_error
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
                                        struct fenced_buffer *fenced_buf,
                                        boolean wait)
{
   assert(!fenced_buf->buffer);

   /*
    * Check for signaled buffers before trying to allocate.
    */
   fenced_manager_check_signalled_locked(fenced_mgr, FALSE);

   fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);

   /*
    * Keep trying while there is some sort of progress:
    * - fences are expiring,
    * - or buffers are being being swapped out from GPU memory into CPU memory.
    */
   while(!fenced_buf->buffer &&
         (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
          fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
      fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
   }

   if(!fenced_buf->buffer && wait) {
      /*
       * Same as before, but this time around, wait to free buffers if
       * necessary.
       */
      while(!fenced_buf->buffer &&
            (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
             fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
         fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
604 605
      }
   }
606 607 608 609 610 611 612

   if(!fenced_buf->buffer) {
      if(0)
         fenced_manager_dump_locked(fenced_mgr);

      /* give up */
      return PIPE_ERROR_OUT_OF_MEMORY;
613
   }
614 615 616 617 618 619 620 621 622 623 624 625 626

   return PIPE_OK;
}


static enum pipe_error
fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
{
   uint8_t *map;

   assert(fenced_buf->data);
   assert(fenced_buf->buffer);

Keith Whitwell's avatar
Keith Whitwell committed
627
   map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE);
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
   if(!map)
      return PIPE_ERROR;

   memcpy(map, fenced_buf->data, fenced_buf->size);

   pb_unmap(fenced_buf->buffer);

   return PIPE_OK;
}


static enum pipe_error
fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
{
   const uint8_t *map;

   assert(fenced_buf->data);
   assert(fenced_buf->buffer);

Keith Whitwell's avatar
Keith Whitwell committed
647
   map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
   if(!map)
      return PIPE_ERROR;

   memcpy(fenced_buf->data, map, fenced_buf->size);

   pb_unmap(fenced_buf->buffer);

   return PIPE_OK;
}


static void
fenced_buffer_destroy(struct pb_buffer *buf)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;

   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));

   pipe_mutex_lock(fenced_mgr->mutex);

   fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);

   pipe_mutex_unlock(fenced_mgr->mutex);
672 673 674 675
}


static void *
676
fenced_buffer_map(struct pb_buffer *buf,
677 678
                  unsigned flags)
{
679
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
680 681 682 683 684
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;
   void *map = NULL;

   pipe_mutex_lock(fenced_mgr->mutex);
685

Keith Whitwell's avatar
Keith Whitwell committed
686
   assert(!(flags & PB_USAGE_GPU_READ_WRITE));
687 688 689 690

   /*
    * Serialize writes.
    */
Keith Whitwell's avatar
Keith Whitwell committed
691 692 693
   while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
         ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
          (flags & PB_USAGE_CPU_WRITE))) {
694 695 696 697

      /* 
       * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
       */
Keith Whitwell's avatar
Keith Whitwell committed
698
      if((flags & PB_USAGE_DONTBLOCK) &&
699
          ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
700
         goto done;
701
      }
702

Keith Whitwell's avatar
Keith Whitwell committed
703
      if (flags & PB_USAGE_UNSYNCHRONIZED) {
704
         break;
705
      }
706 707 708 709 710 711

      /*
       * Wait for the GPU to finish accessing. This will release and re-acquire
       * the mutex, so all copies of mutable state must be discarded.
       */
      fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
712 713
   }

714 715
   if(fenced_buf->buffer) {
      map = pb_map(fenced_buf->buffer, flags);
716
   }
717 718 719 720 721
   else {
      assert(fenced_buf->data);
      map = fenced_buf->data;
   }

722
   if(map) {
723
      ++fenced_buf->mapcount;
Keith Whitwell's avatar
Keith Whitwell committed
724
      fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
725 726
   }

727 728 729
done:
   pipe_mutex_unlock(fenced_mgr->mutex);

730
   return map;
731 732 733 734
}


static void
735
fenced_buffer_unmap(struct pb_buffer *buf)
736
{
737
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
738 739 740 741
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;

   pipe_mutex_lock(fenced_mgr->mutex);

742
   assert(fenced_buf->mapcount);
743
   if(fenced_buf->mapcount) {
744 745
      if (fenced_buf->buffer)
         pb_unmap(fenced_buf->buffer);
746 747
      --fenced_buf->mapcount;
      if(!fenced_buf->mapcount)
Keith Whitwell's avatar
Keith Whitwell committed
748
	 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
749
   }
750 751

   pipe_mutex_unlock(fenced_mgr->mutex);
752 753 754
}


755 756 757 758 759 760
static enum pipe_error
fenced_buffer_validate(struct pb_buffer *buf,
                       struct pb_validate *vl,
                       unsigned flags)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
761
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
762
   enum pipe_error ret;
763 764 765

   pipe_mutex_lock(fenced_mgr->mutex);

766 767 768 769
   if(!vl) {
      /* invalidate */
      fenced_buf->vl = NULL;
      fenced_buf->validation_flags = 0;
770 771
      ret = PIPE_OK;
      goto done;
772
   }
773

Keith Whitwell's avatar
Keith Whitwell committed
774 775 776
   assert(flags & PB_USAGE_GPU_READ_WRITE);
   assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
   flags &= PB_USAGE_GPU_READ_WRITE;
777

778 779 780 781
   /* Buffer cannot be validated in two different lists */
   if(fenced_buf->vl && fenced_buf->vl != vl) {
      ret = PIPE_ERROR_RETRY;
      goto done;
782 783 784 785 786
   }

   if(fenced_buf->vl == vl &&
      (fenced_buf->validation_flags & flags) == flags) {
      /* Nothing to do -- buffer already validated */
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
      ret = PIPE_OK;
      goto done;
   }

   /*
    * Create and update GPU storage.
    */
   if(!fenced_buf->buffer) {
      assert(!fenced_buf->mapcount);

      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
      if(ret != PIPE_OK) {
         goto done;
      }

      ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
      if(ret != PIPE_OK) {
         fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
         goto done;
      }

      if(fenced_buf->mapcount) {
         debug_printf("warning: validating a buffer while it is still mapped\n");
      }
      else {
         fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
      }
814
   }
815

816 817
   ret = pb_validate(fenced_buf->buffer, vl, flags);
   if (ret != PIPE_OK)
818 819
      goto done;

820 821
   fenced_buf->vl = vl;
   fenced_buf->validation_flags |= flags;
822 823 824 825 826

done:
   pipe_mutex_unlock(fenced_mgr->mutex);

   return ret;
827 828 829 830 831 832 833
}


static void
fenced_buffer_fence(struct pb_buffer *buf,
                    struct pipe_fence_handle *fence)
{
834 835 836
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;
837

838
   pipe_mutex_lock(fenced_mgr->mutex);
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->buffer);

   if(fence != fenced_buf->fence) {
      assert(fenced_buf->vl);
      assert(fenced_buf->validation_flags);

      if (fenced_buf->fence) {
         boolean destroyed;
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
         assert(!destroyed);
      }
      if (fence) {
         ops->fence_reference(ops, &fenced_buf->fence, fence);
         fenced_buf->flags |= fenced_buf->validation_flags;
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
      }

      pb_fence(fenced_buf->buffer, fence);

      fenced_buf->vl = NULL;
      fenced_buf->validation_flags = 0;
862 863
   }

864
   pipe_mutex_unlock(fenced_mgr->mutex);
865 866 867
}


868
static void
869 870
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
                              struct pb_buffer **base_buf,
871
                              pb_size *offset)
872 873
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;

   pipe_mutex_lock(fenced_mgr->mutex);

   /*
    * This should only be called when the buffer is validated. Typically
    * when processing relocations.
    */
   assert(fenced_buf->vl);
   assert(fenced_buf->buffer);

   if(fenced_buf->buffer)
      pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
   else {
      *base_buf = buf;
      *offset = 0;
   }

   pipe_mutex_unlock(fenced_mgr->mutex);
893 894 895
}


896
static const struct pb_vtbl
897
fenced_buffer_vtbl = {
898
      fenced_buffer_destroy,
899 900
      fenced_buffer_map,
      fenced_buffer_unmap,
901 902
      fenced_buffer_validate,
      fenced_buffer_fence,
903 904 905 906
      fenced_buffer_get_base_buffer
};


907 908 909 910 911 912 913
/**
 * Wrap a buffer in a fenced buffer.
 */
static struct pb_buffer *
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
                            pb_size size,
                            const struct pb_desc *desc)
914
{
915 916 917 918 919 920 921 922 923 924 925
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
   struct fenced_buffer *fenced_buf;
   enum pipe_error ret;

   /*
    * Don't stall the GPU, waste time evicting buffers, or waste memory
    * trying to create a buffer that will most likely never fit into the
    * graphics aperture.
    */
   if(size > fenced_mgr->max_buffer_size) {
      goto no_buffer;
926
   }
927

928 929 930
   fenced_buf = CALLOC_STRUCT(fenced_buffer);
   if(!fenced_buf)
      goto no_buffer;
931

932 933 934 935 936 937
   pipe_reference_init(&fenced_buf->base.base.reference, 1);
   fenced_buf->base.base.alignment = desc->alignment;
   fenced_buf->base.base.usage = desc->usage;
   fenced_buf->base.base.size = size;
   fenced_buf->size = size;
   fenced_buf->desc = *desc;
938

939 940
   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
   fenced_buf->mgr = fenced_mgr;
941

942 943 944 945 946 947
   pipe_mutex_lock(fenced_mgr->mutex);

   /*
    * Try to create GPU storage without stalling,
    */
   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
948

949 950 951 952 953 954
   /*
    * Attempt to use CPU memory to avoid stalling the GPU.
    */
   if(ret != PIPE_OK) {
      ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
   }
955

956 957 958 959 960 961 962 963 964 965 966 967 968
   /*
    * Create GPU storage, waiting for some to be available.
    */
   if(ret != PIPE_OK) {
      ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
   }

   /*
    * Give up.
    */
   if(ret != PIPE_OK) {
      goto no_storage;
   }
969

970
   assert(fenced_buf->buffer || fenced_buf->data);
971

972 973 974
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
   ++fenced_mgr->num_unfenced;
   pipe_mutex_unlock(fenced_mgr->mutex);
975

976
   return &fenced_buf->base;
977

978 979 980 981 982
no_storage:
   pipe_mutex_unlock(fenced_mgr->mutex);
   FREE(fenced_buf);
no_buffer:
   return NULL;
983 984 985
}


986 987
static void
fenced_bufmgr_flush(struct pb_manager *mgr)
988
{
989
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
990

991 992 993 994
   pipe_mutex_lock(fenced_mgr->mutex);
   while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
      ;
   pipe_mutex_unlock(fenced_mgr->mutex);
995

996 997 998
   assert(fenced_mgr->provider->flush);
   if(fenced_mgr->provider->flush)
      fenced_mgr->provider->flush(fenced_mgr->provider);
999 1000 1001
}


1002 1003
static void
fenced_bufmgr_destroy(struct pb_manager *mgr)
1004
{
1005 1006 1007
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);

   pipe_mutex_lock(fenced_mgr->mutex);
1008 1009

   /* Wait on outstanding fences */
1010 1011
   while (fenced_mgr->num_fenced) {
      pipe_mutex_unlock(fenced_mgr->mutex);
1012
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1013
      sched_yield();
José Fonseca's avatar
José Fonseca committed
1014
#endif
1015 1016 1017
      pipe_mutex_lock(fenced_mgr->mutex);
      while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
         ;
1018 1019
   }

1020
#ifdef DEBUG
1021
   /*assert(!fenced_mgr->num_unfenced);*/
1022
#endif
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

   pipe_mutex_unlock(fenced_mgr->mutex);
   pipe_mutex_destroy(fenced_mgr->mutex);

   if(fenced_mgr->provider)
      fenced_mgr->provider->destroy(fenced_mgr->provider);

   fenced_mgr->ops->destroy(fenced_mgr->ops);

   FREE(fenced_mgr);
1033 1034 1035
}


1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
struct pb_manager *
fenced_bufmgr_create(struct pb_manager *provider,
                     struct pb_fence_ops *ops,
                     pb_size max_buffer_size,
                     pb_size max_cpu_total_size)
{
   struct fenced_manager *fenced_mgr;

   if(!provider)
      return NULL;

   fenced_mgr = CALLOC_STRUCT(fenced_manager);
   if (!fenced_mgr)
      return NULL;

   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
   fenced_mgr->base.flush = fenced_bufmgr_flush;

   fenced_mgr->provider = provider;
   fenced_mgr->ops = ops;
   fenced_mgr->max_buffer_size = max_buffer_size;
   fenced_mgr->max_cpu_total_size = max_cpu_total_size;

   LIST_INITHEAD(&fenced_mgr->fenced);
   fenced_mgr->num_fenced = 0;

   LIST_INITHEAD(&fenced_mgr->unfenced);
   fenced_mgr->num_unfenced = 0;

   pipe_mutex_init(fenced_mgr->mutex);

   return &fenced_mgr->base;
}