/*
 * Copyright 2017 Advanced Micro Devices, Inc.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * on the rights to use, copy, modify, merge, publish, distribute, sub
 * license, and/or sell copies of the Software, and to permit persons to whom
 * the Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include "si_pipe.h"
#include "sid.h"

#include "util/u_format.h"
#include "util/u_pack_color.h"
#include "util/u_surface.h"

enum {
	SI_CLEAR         = SI_SAVE_FRAGMENT_STATE,
	SI_CLEAR_SURFACE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE,
};

static void si_alloc_separate_cmask(struct si_screen *sscreen,
				    struct si_texture *tex)
{
	if (tex->cmask_buffer || !tex->surface.cmask_size)
                return;

	tex->cmask_buffer =
		si_aligned_buffer_create(&sscreen->b,
					 SI_RESOURCE_FLAG_UNMAPPABLE,
					 PIPE_USAGE_DEFAULT,
					 tex->surface.cmask_size,
					 tex->surface.cmask_alignment);
	if (tex->cmask_buffer == NULL)
		return;

	tex->cmask_base_address_reg = tex->cmask_buffer->gpu_address >> 8;
	tex->cb_color_info |= S_028C70_FAST_CLEAR(1);

	p_atomic_inc(&sscreen->compressed_colortex_counter);
}

static bool si_set_clear_color(struct si_texture *tex,
			       enum pipe_format surface_format,
			       const union pipe_color_union *color)
{
	union util_color uc;

	memset(&uc, 0, sizeof(uc));

	if (tex->surface.bpe == 16) {
		/* DCC fast clear only:
		 *   CLEAR_WORD0 = R = G = B
		 *   CLEAR_WORD1 = A
		 */
		assert(color->ui[0] == color->ui[1] &&
		       color->ui[0] == color->ui[2]);
		uc.ui[0] = color->ui[0];
		uc.ui[1] = color->ui[3];
	} else if (util_format_is_pure_uint(surface_format)) {
		util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
	} else if (util_format_is_pure_sint(surface_format)) {
		util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
	} else {
		util_pack_color(color->f, surface_format, &uc);
	}

	if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
		return false;

	memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
	return true;
}

/** Linearize and convert luminace/intensity to red. */
enum pipe_format si_simplify_cb_format(enum pipe_format format)
{
	format = util_format_linear(format);
	format = util_format_luminance_to_red(format);
	return util_format_intensity_to_red(format);
}

bool vi_alpha_is_on_msb(enum pipe_format format)
{
	format = si_simplify_cb_format(format);

	/* Formats with 3 channels can't have alpha. */
	if (util_format_description(format)->nr_channels == 3)
		return true; /* same as xxxA; is any value OK here? */

	return si_translate_colorswap(format, false) <= 1;
}

static bool vi_get_fast_clear_parameters(enum pipe_format base_format,
					 enum pipe_format surface_format,
					 const union pipe_color_union *color,
					 uint32_t* clear_value,
					 bool *eliminate_needed)
{
	/* If we want to clear without needing a fast clear eliminate step, we
	 * can set color and alpha independently to 0 or 1 (or 0/max for integer
	 * formats).
	 */
	bool values[4] = {}; /* whether to clear to 0 or 1 */
	bool color_value = false; /* clear color to 0 or 1 */
	bool alpha_value = false; /* clear alpha to 0 or 1 */
	int alpha_channel; /* index of the alpha component */
	bool has_color = false;
	bool has_alpha = false;

	const struct util_format_description *desc =
		util_format_description(si_simplify_cb_format(surface_format));

	/* 128-bit fast clear with different R,G,B values is unsupported. */
	if (desc->block.bits == 128 &&
	    (color->ui[0] != color->ui[1] ||
	     color->ui[0] != color->ui[2]))
		return false;

	*eliminate_needed = true;
	*clear_value = 0x20202020U; /* use CB clear color registers */

	if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
		return true; /* need ELIMINATE_FAST_CLEAR */

	bool base_alpha_is_on_msb = vi_alpha_is_on_msb(base_format);
	bool surf_alpha_is_on_msb = vi_alpha_is_on_msb(surface_format);

	/* Formats with 3 channels can't have alpha. */
	if (desc->nr_channels == 3)
		alpha_channel = -1;
	else if (surf_alpha_is_on_msb)
		alpha_channel = desc->nr_channels - 1;
	else
		alpha_channel = 0;

	for (int i = 0; i < 4; ++i) {
		if (desc->swizzle[i] >= PIPE_SWIZZLE_0)
			continue;

		if (desc->channel[i].pure_integer &&
		    desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
			/* Use the maximum value for clamping the clear color. */
			int max = u_bit_consecutive(0, desc->channel[i].size - 1);

			values[i] = color->i[i] != 0;
			if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
				return true; /* need ELIMINATE_FAST_CLEAR */
		} else if (desc->channel[i].pure_integer &&
			   desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
			/* Use the maximum value for clamping the clear color. */
			unsigned max = u_bit_consecutive(0, desc->channel[i].size);

			values[i] = color->ui[i] != 0U;
			if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
				return true; /* need ELIMINATE_FAST_CLEAR */
		} else {
			values[i] = color->f[i] != 0.0F;
			if (color->f[i] != 0.0F && color->f[i] != 1.0F)
				return true; /* need ELIMINATE_FAST_CLEAR */
		}

		if (desc->swizzle[i] == alpha_channel) {
			alpha_value = values[i];
			has_alpha = true;
		} else {
			color_value = values[i];
			has_color = true;
		}
	}

	/* If alpha isn't present, make it the same as color, and vice versa. */
	if (!has_alpha)
		alpha_value = color_value;
	else if (!has_color)
		color_value = alpha_value;

	if (color_value != alpha_value &&
	    base_alpha_is_on_msb != surf_alpha_is_on_msb)
		return true; /* require ELIMINATE_FAST_CLEAR */

	/* Check if all color values are equal if they are present. */
	for (int i = 0; i < 4; ++i) {
		if (desc->swizzle[i] <= PIPE_SWIZZLE_W &&
		    desc->swizzle[i] != alpha_channel &&
		    values[i] != color_value)
			return true; /* require ELIMINATE_FAST_CLEAR */
	}

	/* This doesn't need ELIMINATE_FAST_CLEAR.
	 * CB uses both the DCC clear codes and the CB clear color registers,
	 * so they must match.
	 */
	*eliminate_needed = false;

	if (color_value)
		*clear_value |= 0x80808080U;
	if (alpha_value)
		*clear_value |= 0x40404040U;
	return true;
}

void vi_dcc_clear_level(struct si_context *sctx,
			struct si_texture *tex,
			unsigned level, unsigned clear_value)
{
	struct pipe_resource *dcc_buffer;
	uint64_t dcc_offset, clear_size;

	assert(vi_dcc_enabled(tex, level));

	if (tex->dcc_separate_buffer) {
		dcc_buffer = &tex->dcc_separate_buffer->b.b;
		dcc_offset = 0;
	} else {
		dcc_buffer = &tex->buffer.b.b;
		dcc_offset = tex->dcc_offset;
	}

	if (sctx->chip_class >= GFX9) {
		/* Mipmap level clears aren't implemented. */
		assert(tex->buffer.b.b.last_level == 0);
		/* 4x and 8x MSAA needs a sophisticated compute shader for
		 * the clear. See AMDVLK. */
		assert(tex->buffer.b.b.nr_storage_samples <= 2);
		clear_size = tex->surface.dcc_size;
	} else {
		unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);

		/* If this is 0, fast clear isn't possible. (can occur with MSAA) */
		assert(tex->surface.u.legacy.level[level].dcc_fast_clear_size);
		/* Layered 4x and 8x MSAA DCC fast clears need to clear
		 * dcc_fast_clear_size bytes for each layer. A compute shader
		 * would be more efficient than separate per-layer clear operations.
		 */
		assert(tex->buffer.b.b.nr_storage_samples <= 2 || num_layers == 1);

		dcc_offset += tex->surface.u.legacy.level[level].dcc_offset;
		clear_size = tex->surface.u.legacy.level[level].dcc_fast_clear_size *
			     num_layers;
	}

	si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size,
			&clear_value, 4, SI_COHERENCY_CB_META);
}

/* Set the same micro tile mode as the destination of the last MSAA resolve.
 * This allows hitting the MSAA resolve fast path, which requires that both
 * src and dst micro tile modes match.
 */
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
					   struct si_texture *tex)
{
	if (tex->buffer.b.is_shared ||
	    tex->buffer.b.b.nr_samples <= 1 ||
	    tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
		return;

	assert(sscreen->info.chip_class >= GFX9 ||
	       tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
	assert(tex->buffer.b.b.last_level == 0);

	if (sscreen->info.chip_class >= GFX9) {
		/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
		assert(tex->surface.u.gfx9.surf.swizzle_mode >= 4);

		/* If you do swizzle_mode % 4, you'll get:
		 *   0 = Depth
		 *   1 = Standard,
		 *   2 = Displayable
		 *   3 = Rotated
		 *
		 * Depth-sample order isn't allowed:
		 */
		assert(tex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);

		switch (tex->last_msaa_resolve_target_micro_mode) {
		case RADEON_MICRO_MODE_DISPLAY:
			tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
			tex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
			break;
		case RADEON_MICRO_MODE_THIN:
			tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
			tex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
			break;
		case RADEON_MICRO_MODE_ROTATED:
			tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
			tex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
			break;
		default: /* depth */
			assert(!"unexpected micro mode");
			return;
		}
	} else if (sscreen->info.chip_class >= CIK) {
		/* These magic numbers were copied from addrlib. It doesn't use
		 * any definitions for them either. They are all 2D_TILED_THIN1
		 * modes with different bpp and micro tile mode.
		 */
		switch (tex->last_msaa_resolve_target_micro_mode) {
		case RADEON_MICRO_MODE_DISPLAY:
			tex->surface.u.legacy.tiling_index[0] = 10;
			break;
		case RADEON_MICRO_MODE_THIN:
			tex->surface.u.legacy.tiling_index[0] = 14;
			break;
		case RADEON_MICRO_MODE_ROTATED:
			tex->surface.u.legacy.tiling_index[0] = 28;
			break;
		default: /* depth, thick */
			assert(!"unexpected micro mode");
			return;
		}
	} else { /* SI */
		switch (tex->last_msaa_resolve_target_micro_mode) {
		case RADEON_MICRO_MODE_DISPLAY:
			switch (tex->surface.bpe) {
			case 1:
                            tex->surface.u.legacy.tiling_index[0] = 10;
                            break;
			case 2:
                            tex->surface.u.legacy.tiling_index[0] = 11;
                            break;
			default: /* 4, 8 */
                            tex->surface.u.legacy.tiling_index[0] = 12;
                            break;
			}
			break;
		case RADEON_MICRO_MODE_THIN:
			switch (tex->surface.bpe) {
			case 1:
                                tex->surface.u.legacy.tiling_index[0] = 14;
                                break;
			case 2:
                                tex->surface.u.legacy.tiling_index[0] = 15;
                                break;
			case 4:
                                tex->surface.u.legacy.tiling_index[0] = 16;
                                break;
			default: /* 8, 16 */
                                tex->surface.u.legacy.tiling_index[0] = 17;
                                break;
			}
			break;
		default: /* depth, thick */
			assert(!"unexpected micro mode");
			return;
		}
	}

	tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;

	p_atomic_inc(&sscreen->dirty_tex_counter);
}

static void si_do_fast_color_clear(struct si_context *sctx,
				   unsigned *buffers,
				   const union pipe_color_union *color)
{
	struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
	int i;

	/* This function is broken in BE, so just disable this path for now */
#ifdef PIPE_ARCH_BIG_ENDIAN
	return;
#endif

	if (sctx->render_cond)
		return;

	for (i = 0; i < fb->nr_cbufs; i++) {
		struct si_texture *tex;
		unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;

		if (!fb->cbufs[i])
			continue;

		/* if this colorbuffer is not being cleared */
		if (!(*buffers & clear_bit))
			continue;

		unsigned level = fb->cbufs[i]->u.tex.level;
		if (level > 0)
			continue;

		tex = (struct si_texture *)fb->cbufs[i]->texture;

		/* TODO: GFX9: Implement DCC fast clear for level 0 of
		 * mipmapped textures. Mipmapped DCC has to clear a rectangular
		 * area of DCC for level 0 (because the whole miptree is
		 * organized in a 2D plane).
		 */
		if (sctx->chip_class >= GFX9 &&
		    tex->buffer.b.b.last_level > 0)
			continue;

		/* the clear is allowed if all layers are bound */
		if (fb->cbufs[i]->u.tex.first_layer != 0 ||
		    fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
			continue;
		}

		/* only supported on tiled surfaces */
		if (tex->surface.is_linear) {
			continue;
		}

		/* shared textures can't use fast clear without an explicit flush,
		 * because there is no way to communicate the clear color among
		 * all clients
		 */
		if (tex->buffer.b.is_shared &&
		    !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
			continue;

		if (sctx->chip_class <= VI &&
		    tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
		    !sctx->screen->info.htile_cmask_support_1d_tiling)
			continue;

		/* Use a slow clear for small surfaces where the cost of
		 * the eliminate pass can be higher than the benefit of fast
		 * clear. The closed driver does this, but the numbers may differ.
		 *
		 * This helps on both dGPUs and APUs, even small APUs like Mullins.
		 */
		bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
				 tex->buffer.b.b.width0 *
				 tex->buffer.b.b.height0 <= 512 * 512;
		bool eliminate_needed = false;
		bool fmask_decompress_needed = false;

		/* Fast clear is the most appropriate place to enable DCC for
		 * displayable surfaces.
		 */
		if (sctx->family == CHIP_STONEY && !too_small) {
			vi_separate_dcc_try_enable(sctx, tex);

			/* RB+ isn't supported with a CMASK clear only on Stoney,
			 * so all clears are considered to be hypothetically slow
			 * clears, which is weighed when determining whether to
			 * enable separate DCC.
			 */
			if (tex->dcc_gather_statistics) /* only for Stoney */
				tex->num_slow_clears++;
		}

		/* Try to clear DCC first, otherwise try CMASK. */
		if (vi_dcc_enabled(tex, 0)) {
			uint32_t reset_value;

			if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
				continue;

			/* This can happen with mipmapping or MSAA. */
			if (sctx->chip_class == VI &&
			    !tex->surface.u.legacy.level[level].dcc_fast_clear_size)
				continue;

			if (!vi_get_fast_clear_parameters(tex->buffer.b.b.format,
							  fb->cbufs[i]->format,
							  color, &reset_value,
							  &eliminate_needed))
				continue;

			if (eliminate_needed && too_small)
				continue;

			/* DCC fast clear with MSAA should clear CMASK to 0xC. */
			if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
				/* TODO: This doesn't work with MSAA. */
				if (eliminate_needed)
					continue;

				uint32_t clear_value = 0xCCCCCCCC;
				si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
						tex->cmask_offset, tex->surface.cmask_size,
						&clear_value, 4, SI_COHERENCY_CB_META);
				fmask_decompress_needed = true;
			}

			vi_dcc_clear_level(sctx, tex, 0, reset_value);
			tex->separate_dcc_dirty = true;
		} else {
			if (too_small)
				continue;

			/* 128-bit formats are unusupported */
			if (tex->surface.bpe > 8) {
				continue;
			}

			/* RB+ doesn't work with CMASK fast clear on Stoney. */
			if (sctx->family == CHIP_STONEY)
				continue;

			/* ensure CMASK is enabled */
			si_alloc_separate_cmask(sctx->screen, tex);
			if (!tex->cmask_buffer)
				continue;

			/* Do the fast clear. */
			uint32_t clear_value = 0;
			si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
					tex->cmask_offset, tex->surface.cmask_size,
					&clear_value, 4, SI_COHERENCY_CB_META);
			eliminate_needed = true;
		}

		if ((eliminate_needed || fmask_decompress_needed) &&
		    !(tex->dirty_level_mask & (1 << level))) {
			tex->dirty_level_mask |= 1 << level;
			p_atomic_inc(&sctx->screen->compressed_colortex_counter);
		}

		/* We can change the micro tile mode before a full clear. */
		si_set_optimal_micro_tile_mode(sctx->screen, tex);

		*buffers &= ~clear_bit;

		if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
			sctx->framebuffer.dirty_cbufs |= 1 << i;
			si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
		}
	}
}

static void si_clear(struct pipe_context *ctx, unsigned buffers,
		     const union pipe_color_union *color,
		     double depth, unsigned stencil)
{
	struct si_context *sctx = (struct si_context *)ctx;
	struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
	struct pipe_surface *zsbuf = fb->zsbuf;
	struct si_texture *zstex =
		zsbuf ? (struct si_texture*)zsbuf->texture : NULL;

	if (buffers & PIPE_CLEAR_COLOR) {
		si_do_fast_color_clear(sctx, &buffers, color);
		if (!buffers)
			return; /* all buffers have been fast cleared */

		/* These buffers cannot use fast clear, make sure to disable expansion. */
		for (unsigned i = 0; i < fb->nr_cbufs; i++) {
			struct si_texture *tex;

			/* If not clearing this buffer, skip. */
			if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
				continue;

			tex = (struct si_texture *)fb->cbufs[i]->texture;
			if (tex->surface.fmask_size == 0)
				tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
		}
	}

	if (zstex &&
	    si_htile_enabled(zstex, zsbuf->u.tex.level) &&
	    zsbuf->u.tex.first_layer == 0 &&
	    zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
		/* TC-compatible HTILE only supports depth clears to 0 or 1. */
		if (buffers & PIPE_CLEAR_DEPTH &&
		    (!zstex->tc_compatible_htile ||
		     depth == 0 || depth == 1)) {
			/* Need to disable EXPCLEAR temporarily if clearing
			 * to a new value. */
			if (!zstex->depth_cleared || zstex->depth_clear_value != depth) {
				sctx->db_depth_disable_expclear = true;
			}

			if (zstex->depth_clear_value != (float)depth) {
				/* Update DB_DEPTH_CLEAR. */
				zstex->depth_clear_value = depth;
				sctx->framebuffer.dirty_zsbuf = true;
				si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
			}
			sctx->db_depth_clear = true;
			si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
		}

		/* TC-compatible HTILE only supports stencil clears to 0. */
		if (buffers & PIPE_CLEAR_STENCIL &&
		    (!zstex->tc_compatible_htile || stencil == 0)) {
			stencil &= 0xff;

			/* Need to disable EXPCLEAR temporarily if clearing
			 * to a new value. */
			if (!zstex->stencil_cleared || zstex->stencil_clear_value != stencil) {
				sctx->db_stencil_disable_expclear = true;
			}

			if (zstex->stencil_clear_value != (uint8_t)stencil) {
				/* Update DB_STENCIL_CLEAR. */
				zstex->stencil_clear_value = stencil;
				sctx->framebuffer.dirty_zsbuf = true;
				si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
			}
			sctx->db_stencil_clear = true;
			si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
		}

		/* TODO: Find out what's wrong here. Fast depth clear leads to
		 * corruption in ARK: Survival Evolved, but that may just be
		 * a coincidence and the root cause is elsewhere.
		 *
		 * The corruption can be fixed by putting the DB flush before
		 * or after the depth clear. (surprisingly)
		 *
		 * https://bugs.freedesktop.org/show_bug.cgi?id=102955 (apitrace)
		 *
		 * This hack decreases back-to-back ClearDepth performance.
		 */
		if ((sctx->db_depth_clear || sctx->db_stencil_clear) &&
		    sctx->screen->clear_db_cache_before_clear)
			sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB;
	}

	si_blitter_begin(sctx, SI_CLEAR);
	util_blitter_clear(sctx->blitter, fb->width, fb->height,
			   util_framebuffer_get_num_layers(fb),
			   buffers, color, depth, stencil);
	si_blitter_end(sctx);

	if (sctx->db_depth_clear) {
		sctx->db_depth_clear = false;
		sctx->db_depth_disable_expclear = false;
		zstex->depth_cleared = true;
		si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
	}

	if (sctx->db_stencil_clear) {
		sctx->db_stencil_clear = false;
		sctx->db_stencil_disable_expclear = false;
		zstex->stencil_cleared = true;
		si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
	}
}

static void si_clear_render_target(struct pipe_context *ctx,
				   struct pipe_surface *dst,
				   const union pipe_color_union *color,
				   unsigned dstx, unsigned dsty,
				   unsigned width, unsigned height,
				   bool render_condition_enabled)
{
	struct si_context *sctx = (struct si_context *)ctx;

	si_blitter_begin(sctx, SI_CLEAR_SURFACE |
			 (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
	util_blitter_clear_render_target(sctx->blitter, dst, color,
					 dstx, dsty, width, height);
	si_blitter_end(sctx);
}

static void si_clear_depth_stencil(struct pipe_context *ctx,
				   struct pipe_surface *dst,
				   unsigned clear_flags,
				   double depth,
				   unsigned stencil,
				   unsigned dstx, unsigned dsty,
				   unsigned width, unsigned height,
				   bool render_condition_enabled)
{
	struct si_context *sctx = (struct si_context *)ctx;

	si_blitter_begin(sctx, SI_CLEAR_SURFACE |
			 (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
	util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil,
					 dstx, dsty, width, height);
	si_blitter_end(sctx);
}

static void si_clear_texture(struct pipe_context *pipe,
			     struct pipe_resource *tex,
			     unsigned level,
			     const struct pipe_box *box,
			     const void *data)
{
	struct pipe_screen *screen = pipe->screen;
	struct si_texture *stex = (struct si_texture*)tex;
	struct pipe_surface tmpl = {{0}};
	struct pipe_surface *sf;
	const struct util_format_description *desc =
		util_format_description(tex->format);

	tmpl.format = tex->format;
	tmpl.u.tex.first_layer = box->z;
	tmpl.u.tex.last_layer = box->z + box->depth - 1;
	tmpl.u.tex.level = level;
	sf = pipe->create_surface(pipe, tex, &tmpl);
	if (!sf)
		return;

	if (stex->is_depth) {
		unsigned clear;
		float depth;
		uint8_t stencil = 0;

		/* Depth is always present. */
		clear = PIPE_CLEAR_DEPTH;
		desc->unpack_z_float(&depth, 0, data, 0, 1, 1);

		if (stex->surface.has_stencil) {
			clear |= PIPE_CLEAR_STENCIL;
			desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
		}

		si_clear_depth_stencil(pipe, sf, clear, depth, stencil,
				       box->x, box->y,
				       box->width, box->height, false);
	} else {
		union pipe_color_union color;

		/* pipe_color_union requires the full vec4 representation. */
		if (util_format_is_pure_uint(tex->format))
			desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1);
		else if (util_format_is_pure_sint(tex->format))
			desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1);
		else
			desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1);

		if (screen->is_format_supported(screen, tex->format,
						tex->target, 0, 0,
						PIPE_BIND_RENDER_TARGET)) {
			si_clear_render_target(pipe, sf, &color,
					       box->x, box->y,
					       box->width, box->height, false);
		} else {
			/* Software fallback - just for R9G9B9E5_FLOAT */
			util_clear_render_target(pipe, sf, &color,
						 box->x, box->y,
						 box->width, box->height);
		}
	}
	pipe_surface_reference(&sf, NULL);
}

void si_init_clear_functions(struct si_context *sctx)
{
	sctx->b.clear = si_clear;
	sctx->b.clear_render_target = si_clear_render_target;
	sctx->b.clear_depth_stencil = si_clear_depth_stencil;
	sctx->b.clear_texture = si_clear_texture;
}