Merge branch 'master' into blender2.8

This commit is contained in:
Campbell Barton 2017-08-19 21:54:05 +10:00
commit 2332051419
15 changed files with 368 additions and 96 deletions

View File

@ -216,7 +216,7 @@ ccl_device_noinline float3 indirect_primitive_emission(KernelGlobals *kg, Shader
{
/* multiple importance sampling, get triangle light pdf,
* and compute weight with respect to BSDF pdf */
float pdf = triangle_light_pdf(kg, sd->Ng, sd->I, t);
float pdf = triangle_light_pdf(kg, sd, t);
float mis_weight = power_heuristic(bsdf_pdf, pdf);
return L*mis_weight;

View File

@ -763,62 +763,265 @@ ccl_device bool lamp_light_eval(KernelGlobals *kg, int lamp, float3 P, float3 D,
/* Triangle Light */
ccl_device void object_transform_light_sample(KernelGlobals *kg, LightSample *ls, int object, float time)
/* returns true if the triangle is has motion blur or an instancing transform applied */
ccl_device_inline bool triangle_world_space_vertices(KernelGlobals *kg, int object, int prim, float time, float3 V[3])
{
bool has_motion = false;
const int object_flag = kernel_tex_fetch(__object_flag, object);
if(object_flag & SD_OBJECT_HAS_VERTEX_MOTION && time >= 0.0f) {
motion_triangle_vertices(kg, object, prim, time, V);
has_motion = true;
} else {
triangle_vertices(kg, prim, V);
}
#ifdef __INSTANCING__
/* instance transform */
if(!(kernel_tex_fetch(__object_flag, object) & SD_OBJECT_TRANSFORM_APPLIED)) {
if(!(object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
# ifdef __OBJECT_MOTION__
Transform itfm;
Transform tfm = object_fetch_transform_motion_test(kg, object, time, &itfm);
Transform tfm = object_fetch_transform_motion_test(kg, object, time, NULL);
# else
Transform tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
# endif
ls->P = transform_point(&tfm, ls->P);
ls->Ng = normalize(transform_direction(&tfm, ls->Ng));
V[0] = transform_point(&tfm, V[0]);
V[1] = transform_point(&tfm, V[1]);
V[2] = transform_point(&tfm, V[2]);
has_motion = true;
}
#endif
return has_motion;
}
ccl_device void triangle_light_sample(KernelGlobals *kg, int prim, int object,
float randu, float randv, float time, LightSample *ls)
{
float u, v;
/* compute random point in triangle */
randu = sqrtf(randu);
u = 1.0f - randu;
v = randv*randu;
/* triangle, so get position, normal, shader */
triangle_point_normal(kg, object, prim, u, v, &ls->P, &ls->Ng, &ls->shader);
ls->object = object;
ls->prim = prim;
ls->lamp = LAMP_NONE;
ls->shader |= SHADER_USE_MIS;
ls->t = 0.0f;
ls->u = u;
ls->v = v;
ls->type = LIGHT_TRIANGLE;
ls->eval_fac = 1.0f;
object_transform_light_sample(kg, ls, object, time);
}
ccl_device float triangle_light_pdf(KernelGlobals *kg,
const float3 Ng, const float3 I, float t)
ccl_device_inline float triangle_light_pdf_area(KernelGlobals *kg, const float3 Ng, const float3 I, float t)
{
float pdf = kernel_data.integrator.pdf_triangles;
float cos_pi = fabsf(dot(Ng, I));
if(cos_pi == 0.0f)
return 0.0f;
return t*t*pdf/cos_pi;
}
ccl_device_forceinline float triangle_light_pdf(KernelGlobals *kg, ShaderData *sd, float t)
{
/* A naive heuristic to decide between costly solid angle sampling
* and simple area sampling, comparing the distance to the triangle plane
* to the length of the edtes of the triangle.
* Looking at two edge of the triangle should be a sufficient heuristic,
* the third edge can't possibly be longer than the sum of the other two. */
float3 V[3];
bool has_motion = triangle_world_space_vertices(kg, sd->object, sd->prim, sd->time, V);
const float3 e0 = V[1] - V[0];
const float3 e1 = V[2] - V[0];
const float3 e2 = V[2] - V[1];
const float longest_edge_squared = max(len_squared(e0), max(len_squared(e1), len_squared(e2)));
const float3 N = cross(e0, e1);
const float distance_to_plane = fabsf(dot(N, sd->I * t))/dot(N, N);
if(longest_edge_squared > distance_to_plane*distance_to_plane) {
/* sd contains the point on the light source
* calculate Px, the point that we're shading */
const float3 Px = sd->P + sd->I * t;
const float3 v0_p = V[0] - Px;
const float3 v1_p = V[1] - Px;
const float3 v2_p = V[2] - Px;
const float3 u01 = safe_normalize(cross(v0_p, v1_p));
const float3 u02 = safe_normalize(cross(v0_p, v2_p));
const float3 u12 = safe_normalize(cross(v1_p, v2_p));
const float alpha = fast_acosf(dot(u02, u01));
const float beta = fast_acosf(-dot(u01, u12));
const float gamma = fast_acosf(dot(u02, u12));
const float solid_angle = alpha + beta + gamma - M_PI_F;
/* pdf_triangles is calculated over triangle area, but we're not sampling over its area */
if(UNLIKELY(solid_angle == 0.0f)) {
return 0.0f;
} else {
float area = 1.0f;
if(has_motion) {
/* get the center frame vertices, this is what the PDF was calculated from */
triangle_world_space_vertices(kg, sd->object, sd->prim, -1.0f, V);
area = triangle_area(V[0], V[1], V[2]);
} else {
area = 0.5f * len(N);
}
const float pdf = area * kernel_data.integrator.pdf_triangles;
return pdf / solid_angle;
}
}
else {
float pdf = triangle_light_pdf_area(kg, sd->Ng, sd->I, t);
if(has_motion) {
const float area = 0.5f * len(N);
if(UNLIKELY(area == 0.0f)) {
return 0.0f;
}
/* scale the PDF.
* area = the area the sample was taken from
* area_pre = the are from which pdf_triangles was calculated from */
triangle_world_space_vertices(kg, sd->object, sd->prim, -1.0f, V);
const float area_pre = triangle_area(V[0], V[1], V[2]);
pdf = pdf * area_pre / area;
}
return pdf;
}
}
ccl_device_forceinline void triangle_light_sample(KernelGlobals *kg, int prim, int object,
float randu, float randv, float time, LightSample *ls, const float3 P)
{
/* A naive heuristic to decide between costly solid angle sampling
* and simple area sampling, comparing the distance to the triangle plane
* to the length of the edtes of the triangle.
* Looking at two edge of the triangle should be a sufficient heuristic,
* the third edge can't possibly be longer than the sum of the other two. */
float3 V[3];
bool has_motion = triangle_world_space_vertices(kg, object, prim, time, V);
const float3 e0 = V[1] - V[0];
const float3 e1 = V[2] - V[0];
const float3 e2 = V[2] - V[1];
const float longest_edge_squared = max(len_squared(e0), max(len_squared(e1), len_squared(e2)));
const float3 N0 = cross(e0, e1);
float Nl = 0.0f;
ls->Ng = safe_normalize_len(N0, &Nl);
float area = 0.5f * Nl;
/* flip normal if necessary */
const int object_flag = kernel_tex_fetch(__object_flag, object);
if(!(object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED)) {
ls->Ng = -ls->Ng;
}
ls->eval_fac = 1.0f;
ls->shader = kernel_tex_fetch(__tri_shader, prim);
ls->object = object;
ls->prim = prim;
ls->lamp = LAMP_NONE;
ls->shader |= SHADER_USE_MIS;
ls->type = LIGHT_TRIANGLE;
float distance_to_plane = fabsf(dot(N0, V[0] - P)/dot(N0, N0));
if(longest_edge_squared > distance_to_plane*distance_to_plane) {
/* see James Arvo, "Stratified Sampling of Spherical Triangles"
* http://www.graphics.cornell.edu/pubs/1995/Arv95c.pdf */
/* project the triangle to the unit sphere
* and calculate its edges and angles */
const float3 v0_p = V[0] - P;
const float3 v1_p = V[1] - P;
const float3 v2_p = V[2] - P;
const float3 u01 = safe_normalize(cross(v0_p, v1_p));
const float3 u02 = safe_normalize(cross(v0_p, v2_p));
const float3 u12 = safe_normalize(cross(v1_p, v2_p));
const float3 A = safe_normalize(v0_p);
const float3 B = safe_normalize(v1_p);
const float3 C = safe_normalize(v2_p);
const float cos_alpha = dot(u02, u01);
const float cos_beta = -dot(u01, u12);
const float cos_gamma = dot(u02, u12);
/* calculate dihedral angles */
const float alpha = fast_acosf(cos_alpha);
const float beta = fast_acosf(cos_beta);
const float gamma = fast_acosf(cos_gamma);
/* the area of the unit spherical triangle = solid angle */
const float solid_angle = alpha + beta + gamma - M_PI_F;
/* precompute a few things
* these could be re-used to take several samples
* as they are independent of randu/randv */
const float cos_c = dot(A, B);
const float sin_alpha = fast_sinf(alpha);
const float product = sin_alpha * cos_c;
/* Select a random sub-area of the spherical triangle
* and calculate the third vertex C_ of that new triangle */
const float phi = randu * solid_angle - alpha;
float s, t;
fast_sincosf(phi, &s, &t);
const float u = t - cos_alpha;
const float v = s + product;
const float3 U = safe_normalize(C - dot(C, A) * A);
float q = 1.0f;
const float det = ((v * s + u * t) * sin_alpha);
if(det != 0.0f) {
q = ((v * t - u * s) * cos_alpha - v) / det;
}
const float temp = max(1.0f - q*q, 0.0f);
const float3 C_ = safe_normalize(q * A + sqrtf(temp) * U);
/* Finally, select a random point along the edge of the new triangle
* That point on the spherical triangle is the sampled ray direction */
const float z = 1.0f - randv * (1.0f - dot(C_, B));
ls->D = z * B + safe_sqrtf(1.0f - z*z) * safe_normalize(C_ - dot(C_, B) * B);
/* calculate intersection with the planar triangle
* mostly standard ray/tri intersection, with u/v clamped */
const float3 s1 = cross(ls->D, e1);
const float divisor = dot(s1, e0);
if(UNLIKELY(divisor == 0.0f)) {
ls->pdf = 0.0f;
return;
}
const float inv_divisor = 1.0f/divisor;
const float3 d = P - V[0];
ls->u = clamp(dot(d, s1)*inv_divisor, 0.0f, 1.0f);
const float3 s2 = cross(d, e0);
ls->v = clamp(dot(ls->D, s2)*inv_divisor, 0.0f, 1.0f);
ls->t = dot(e1, s2)*inv_divisor;
ls->P = P + ls->D * ls->t;
/* pdf_triangles is calculated over triangle area, but we're sampling over solid angle */
if(UNLIKELY(solid_angle == 0.0f)) {
ls->pdf = 0.0f;
} else {
if(has_motion) {
/* get the center frame vertices, this is what the PDF was calculated from */
triangle_world_space_vertices(kg, object, prim, -1.0f, V);
area = triangle_area(V[0], V[1], V[2]);
}
const float pdf = area * kernel_data.integrator.pdf_triangles;
ls->pdf = pdf / solid_angle;
}
}
else {
/* compute random point in triangle */
randu = sqrtf(randu);
const float u = 1.0f - randu;
const float v = randv*randu;
const float t = 1.0f - u - v;
ls->P = u * V[0] + v * V[1] + t * V[2];
/* compute incoming direction, distance and pdf */
ls->D = normalize_len(ls->P - P, &ls->t);
ls->pdf = triangle_light_pdf_area(kg, ls->Ng, -ls->D, ls->t);
if(has_motion && area != 0.0f) {
/* scale the PDF.
* area = the area the sample was taken from
* area_pre = the are from which pdf_triangles was calculated from */
triangle_world_space_vertices(kg, object, prim, -1.0f, V);
const float area_pre = triangle_area(V[0], V[1], V[2]);
ls->pdf = ls->pdf * area_pre / area;
}
ls->u = u;
ls->v = v;
}
}
/* Light Distribution */
ccl_device int light_distribution_sample(KernelGlobals *kg, float randt)
@ -876,10 +1079,7 @@ ccl_device_noinline bool light_sample(KernelGlobals *kg,
int object = __float_as_int(l.w);
int shader_flag = __float_as_int(l.z);
triangle_light_sample(kg, prim, object, randu, randv, time, ls);
/* compute incoming direction, distance and pdf */
ls->D = normalize_len(ls->P - P, &ls->t);
ls->pdf = triangle_light_pdf(kg, ls->Ng, -ls->D, ls->t);
triangle_light_sample(kg, prim, object, randu, randv, time, ls, P);
ls->shader |= shader_flag;
return (ls->pdf > 0.0f);
}

View File

@ -232,10 +232,6 @@ bool LightManager::object_usable_as_light(Object *object) {
if(!(object->visibility & (PATH_RAY_DIFFUSE|PATH_RAY_GLOSSY|PATH_RAY_TRANSMIT))) {
return false;
}
/* Skip motion blurred deforming meshes, not supported yet. */
if(mesh->has_motion_blur()) {
return false;
}
/* Skip if we have no emission shaders. */
/* TODO(sergey): Ideally we want to avoid such duplicated loop, since it'll
* iterate all mesh shaders twice (when counting and when calculating

View File

@ -367,6 +367,13 @@ void ObjectManager::device_update_object_transform(UpdateObejctTransformState *s
/* OBJECT_PROPERTIES */
objects[offset+8] = make_float4(surface_area, pass_id, random_number, __int_as_float(particle_index));
if(mesh->use_motion_blur) {
state->have_motion = true;
}
if(mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION)) {
flag |= SD_OBJECT_HAS_VERTEX_MOTION;
}
if(state->need_motion == Scene::MOTION_PASS) {
/* Motion transformations, is world/object space depending if mesh
* comes with deformed position in object space, or if we transform
@ -387,9 +394,6 @@ void ObjectManager::device_update_object_transform(UpdateObejctTransformState *s
mtfm.pre = mtfm.pre * itfm;
mtfm.post = mtfm.post * itfm;
}
else {
flag |= SD_OBJECT_HAS_VERTEX_MOTION;
}
memcpy(&objects_vector[object_index*OBJECT_VECTOR_SIZE+0], &mtfm.pre, sizeof(float4)*3);
memcpy(&objects_vector[object_index*OBJECT_VECTOR_SIZE+3], &mtfm.post, sizeof(float4)*3);
@ -408,10 +412,6 @@ void ObjectManager::device_update_object_transform(UpdateObejctTransformState *s
}
#endif
if(mesh->use_motion_blur) {
state->have_motion = true;
}
/* Dupli object coords and motion info. */
int totalsteps = mesh->motion_steps;
int numsteps = (totalsteps - 1)/2;

View File

@ -165,15 +165,17 @@ void TileManager::set_samples(int num_samples_)
uint64_t pixel_samples = 0;
/* While rendering in the viewport, the initial preview resolution is increased to the native resolution
* before the actual rendering begins. Therefore, additional pixel samples will be rendered. */
int divider = get_divider(params.width, params.height, start_resolution) / 2;
while(divider > 1) {
int divider = max(get_divider(params.width, params.height, start_resolution) / 2, pixel_size);
while(divider > pixel_size) {
int image_w = max(1, params.width/divider);
int image_h = max(1, params.height/divider);
pixel_samples += image_w * image_h;
divider >>= 1;
}
state.total_pixel_samples = pixel_samples + (uint64_t)get_num_effective_samples() * params.width*params.height;
int image_w = max(1, params.width/divider);
int image_h = max(1, params.height/divider);
state.total_pixel_samples = pixel_samples + (uint64_t)get_num_effective_samples() * image_w*image_h;
if(schedule_denoising) {
state.total_pixel_samples += params.width*params.height;
}

View File

@ -330,15 +330,22 @@ template<class A, class B> A lerp(const A& a, const A& b, const B& t)
return (A)(a * ((B)1 - t) + b * t);
}
#endif /* __KERNEL_OPENCL__ */
/* Triangle */
#ifndef __KERNEL_OPENCL__
ccl_device_inline float triangle_area(const float3& v1,
const float3& v2,
const float3& v3)
#else
ccl_device_inline float triangle_area(const float3 v1,
const float3 v2,
const float3 v3)
#endif
{
return len(cross(v3 - v2, v1 - v2))*0.5f;
}
#endif /* __KERNEL_OPENCL__ */
/* Orthonormal vectors */

View File

@ -137,6 +137,9 @@ class ANIM_OT_keying_set_export(Operator):
break
else:
self.report({'WARN'}, "Could not find scene using Compositor Node Tree - %s" % (ksp.id))
elif ksp.id.bl_rna.name == "Key":
# "keys" conflicts with a Python keyword, hence the simple solution won't work
id_bpy_path = "bpy.data.shape_keys[\"%s\"]" % (ksp.id.name)
else:
idtype_list = ksp.id.bl_rna.name.lower() + "s"
id_bpy_path = "bpy.data.%s[\"%s\"]" % (idtype_list, ksp.id.name)

View File

@ -1395,7 +1395,7 @@ static PyObject *pyrna_enum_to_py(PointerRNA *ptr, PropertyRNA *prop, int val)
}
else {
EnumPropertyItem *enum_item;
bool free = false;
bool free;
/* don't throw error here, can't trust blender 100% to give the
* right values, python code should not generate error for that */
@ -1404,6 +1404,9 @@ static PyObject *pyrna_enum_to_py(PointerRNA *ptr, PropertyRNA *prop, int val)
ret = PyUnicode_FromString(enum_item->identifier);
}
else {
if (free) {
MEM_freeN(enum_item);
}
RNA_property_enum_items(NULL, ptr, prop, &enum_item, NULL, &free);
/* Do not print warning in case of DummyRNA_NULL_items, this one will never match any value... */

View File

@ -144,36 +144,6 @@ static PyObject *pygpu_offscreen_unbind(BPy_GPUOffScreen *self, PyObject *args,
Py_RETURN_NONE;
}
/**
* Use with PyArg_ParseTuple's "O&" formatting.
*/
static int UNUSED_FUNCTION(pygpu_offscreen_check_matrix)(PyObject *o, void *p)
{
MatrixObject **pymat_p = p;
MatrixObject *pymat = (MatrixObject *)o;
if (!MatrixObject_Check(pymat)) {
PyErr_Format(PyExc_TypeError,
"expected a mathutils.Matrix, not a %.200s",
Py_TYPE(o)->tp_name);
return 0;
}
if (BaseMath_ReadCallback(pymat) == -1) {
return 0;
}
if ((pymat->num_col != 4) ||
(pymat->num_row != 4))
{
PyErr_SetString(PyExc_ValueError, "matrix must be 4x4");
return 0;
}
*pymat_p = pymat;
return 1;
}
PyDoc_STRVAR(pygpu_offscreen_draw_view3d_doc,
"draw_view3d(scene, view3d, region, modelview_matrix, projection_matrix)\n"
"\n"
@ -212,8 +182,8 @@ static PyObject *pygpu_offscreen_draw_view3d(BPy_GPUOffScreen *self, PyObject *a
if (!PyArg_ParseTupleAndKeywords(
args, kwds, "OOOOO&O&:draw_view3d", (char **)(kwlist),
&py_scene, &py_scene_layer, &py_view3d, &py_region,
pygpu_offscreen_check_matrix, &py_mat_projection,
pygpu_offscreen_check_matrix, &py_mat_modelview) ||
Matrix_Parse4x4, &py_mat_projection,
Matrix_Parse4x4, &py_mat_modelview) ||
(!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
!(sl = PyC_RNA_AsPointer(py_scene_layer, "SceneLayer")) ||
!(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||

View File

@ -2914,6 +2914,73 @@ PyObject *Matrix_CreatePyObject_cb(PyObject *cb_user,
return (PyObject *) self;
}
/**
* Use with PyArg_ParseTuple's "O&" formatting.
*/
static bool Matrix_ParseCheck(MatrixObject *pymat)
{
if (!MatrixObject_Check(pymat)) {
PyErr_Format(PyExc_TypeError,
"expected a mathutils.Matrix, not a %.200s",
Py_TYPE(pymat)->tp_name);
return 0;
}
/* sets error */
if (BaseMath_ReadCallback(pymat) == -1) {
return 0;
}
return 1;
}
int Matrix_ParseAny(PyObject *o, void *p)
{
MatrixObject **pymat_p = p;
MatrixObject *pymat = (MatrixObject *)o;
if (!Matrix_ParseCheck(pymat)) {
return 0;
}
*pymat_p = pymat;
return 1;
}
int Matrix_Parse3x3(PyObject *o, void *p)
{
MatrixObject **pymat_p = p;
MatrixObject *pymat = (MatrixObject *)o;
if (!Matrix_ParseCheck(pymat)) {
return 0;
}
if ((pymat->num_col != 3) ||
(pymat->num_row != 3))
{
PyErr_SetString(PyExc_ValueError, "matrix must be 3x3");
return 0;
}
*pymat_p = pymat;
return 1;
}
int Matrix_Parse4x4(PyObject *o, void *p)
{
MatrixObject **pymat_p = p;
MatrixObject *pymat = (MatrixObject *)o;
if (!Matrix_ParseCheck(pymat)) {
return 0;
}
if ((pymat->num_col != 4) ||
(pymat->num_row != 4))
{
PyErr_SetString(PyExc_ValueError, "matrix must be 4x4");
return 0;
}
*pymat_p = pymat;
return 1;
}
/* ----------------------------------------------------------------------------
* special type for alternate access */

View File

@ -77,6 +77,11 @@ PyObject *Matrix_CreatePyObject_cb(
unsigned char cb_type, unsigned char cb_subtype
) ATTR_WARN_UNUSED_RESULT;
/* PyArg_ParseTuple's "O&" formatting helpers. */
int Matrix_ParseAny(PyObject *o, void *p);
int Matrix_Parse3x3(PyObject *o, void *p);
int Matrix_Parse4x4(PyObject *o, void *p);
extern unsigned char mathutils_matrix_row_cb_index; /* default */
extern unsigned char mathutils_matrix_col_cb_index;
extern unsigned char mathutils_matrix_translation_cb_index;

View File

@ -114,6 +114,7 @@ void Filter_dealloc(PyFilter *self)
delete self->m_filter;
self->m_filter = NULL;
}
Py_TYPE((PyObject *)self)->tp_free((PyObject *)self);
}

View File

@ -427,6 +427,7 @@ void Image_dealloc(PyImage *self)
delete self->m_image;
self->m_image = NULL;
}
Py_TYPE((PyObject *)self)->tp_free((PyObject *)self);
}
// get image data

View File

@ -539,6 +539,7 @@ if(WITH_CYCLES)
add_cycles_render_test(image_data_types)
add_cycles_render_test(image_mapping)
add_cycles_render_test(image_texture_limit)
add_cycles_render_test(light)
add_cycles_render_test(mblur)
add_cycles_render_test(reports)
add_cycles_render_test(render)

View File

@ -114,16 +114,27 @@ def test_get_name(filepath):
def test_get_images(filepath):
testname = test_get_name(filepath)
dirpath = os.path.dirname(filepath)
ref_dirpath = os.path.join(dirpath, "reference_renders")
old_dirpath = os.path.join(dirpath, "reference_renders")
old_img = os.path.join(old_dirpath, testname + ".png")
ref_dirpath = os.path.join(OUTDIR, os.path.basename(dirpath), "ref")
ref_img = os.path.join(ref_dirpath, testname + ".png")
if not os.path.exists(ref_dirpath):
os.makedirs(ref_dirpath)
if os.path.exists(old_img):
shutil.copy(old_img, ref_img)
new_dirpath = os.path.join(OUTDIR, os.path.basename(dirpath))
if not os.path.exists(new_dirpath):
os.makedirs(new_dirpath)
new_img = os.path.join(new_dirpath, testname + ".png")
diff_dirpath = os.path.join(OUTDIR, os.path.basename(dirpath), "diff")
if not os.path.exists(diff_dirpath):
os.makedirs(diff_dirpath)
diff_img = os.path.join(diff_dirpath, testname + ".diff.png")
return ref_img, new_img, diff_img
@ -162,7 +173,7 @@ class Report:
<head>
<title>Cycles Test Report</title>
<style>
img {{ image-rendering: pixelated; width: 256; background-color: #000; }}
img {{ image-rendering: pixelated; width: 256px; background-color: #000; }}
img.render {{
background-color: #fff;
background-image:
@ -182,7 +193,7 @@ class Report:
background-position:0 0, 25px 0, 25px -25px, 0px 25px;
}}
table td:first-child {{ width: 100%; }}
table td:first-child {{ width: 256px; }}
</style>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css">
</head>
@ -210,17 +221,22 @@ class Report:
print_message("Report saved to: " + pathlib.Path(filepath).as_uri())
def relative_url(self, filepath):
relpath = os.path.relpath(filepath, OUTDIR)
return pathlib.Path(relpath).as_posix()
def add_test(self, filepath, error):
name = test_get_name(filepath)
name = name.replace('_', ' ')
ref_img, new_img, diff_img = test_get_images(filepath)
status = error if error else ""
style = """ style="background-color: #f99;" """ if error else ""
new_url = pathlib.Path(new_img).as_uri()
ref_url = pathlib.Path(ref_img).as_uri()
diff_url = pathlib.Path(diff_img).as_uri()
new_url = self.relative_url(new_img)
ref_url = self.relative_url(ref_img)
diff_url = self.relative_url(diff_img)
test_html = """
<tr{}>