Skip to content

Commit

Permalink
set material blend mode to alpha blend
Browse files Browse the repository at this point in the history
  • Loading branch information
Tarcontar committed Aug 20, 2019
1 parent abc79de commit 07186b8
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 212 deletions.
2 changes: 1 addition & 1 deletion io_mesh_w3d/utils_w3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

from bpy_extras.node_shader_utils import PrincipledBSDFWrapper
from bpy_extras.image_utils import load_image

from bpy.props import FloatVectorProperty, StringProperty, PointerProperty

from io_mesh_w3d.w3d_io_binary import *
Expand Down Expand Up @@ -187,6 +186,7 @@ def rgb_to_vector(rgb):
def create_vert_material(mesh, vertMat):
mat = bpy.data.materials.new(mesh.header.meshName + "." + vertMat.vmName)
mat.use_nodes = True
mat.blend_method = 'BLEND'
principled = PrincipledBSDFWrapper(mat, is_readonly=False)
principled.base_color = rgb_to_vector(vertMat.vmInfo.diffuse)
principled.alpha = vertMat.vmInfo.opacity
Expand Down
213 changes: 2 additions & 211 deletions tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,138 +7,25 @@
import sys
import time


def clean_tmp_folder():
# TODO: This cannot run when the tmp folder is open
# in a file browser or is in use. This is annoying.
if os.path.exists('./tests/tmp'):
# empty temp directory
shutil.rmtree('./tests/tmp',ignore_errors=True)

# create temp dir if not exists
try:
os.makedirs('./tests/tmp',exist_ok=True)
except:
pass

def _make_argparse():
parser = argparse.ArgumentParser(description="Runs the W3DMesh test suite")
test_selection = parser.add_argument_group("Test Selection And Control")
test_selection.add_argument("-f", "--filter",
help="Filter test files with a regular expression",
type=str)#[regex]
test_selection.add_argument("-s","--start-at",
help="Start in list of files to test at first matching a regular expression",
type=str)#[regex]
test_selection.add_argument("--exclude",
help="Exclude test files with a regular expression",
type=str)#[regex]
test_selection.add_argument("-c", "--continue",
help="Keep running after a test failure",
default=False,
action="store_true",
dest="keep_going")

output_control = parser.add_argument_group("Output Control")
output_control.add_argument("-q", "--quiet",
default=False,
help="Only output if tests pass or fail",
action="store_true")
output_control.add_argument("-p", "--print-fails",
default=False,
help="Like --quiet, but also prints the output of failed tests",
action="store_true")

blender_options = parser.add_argument_group("Blender Options")
blender_options.add_argument("--blender",
default="blender",# Use the blender in the system path
type=str,
help="Provide alternative path to Blender executable")
blender_options .add_argument("--force-blender-debug",
help="Turn on Blender's --debug flag",
action="store_true")
blender_options.add_argument("-n", "--no-factory-startup",
help="Run Blender with current prefs rather than factory prefs",
action="store_true")
return parser

def main(argv=None)->int:
'''
Return is exit code, 0 for good, anything else is an error
'''
exit_code = 0

"""
Rather than mess with fancy ways to pass back test results
we have a stupid simple solution: Print a special string
at the end and parse it here.
Unfortunately, the REGEX must be duplicated across both files
due to some problems with importing it from the test module
"""
TEST_RESULTS_REGEX = re.compile(r"RESULT: After (?P<testsRun>\d+) tests got (?P<errors>\d+) errors, (?P<failures>\d+) failures, and (?P<skipped>\d+) skipped")

# Accumulated TestResult stats, reported at the end of everything

total_testsCompleted, total_errors, total_failures, total_skipped = (0,) * 4
timer_start = time.perf_counter()

clean_tmp_folder()
if argv is None:
argv = _make_argparse().parse_args(sys.argv[1:])
if argv.filter:
argv.filter = re.escape(argv.filter)
if argv.start_at:
argv.start_at = re.escape(argv.start_at)
if argv.exclude:
argv.exclude = re.escape(argv.exclude)

def printTestBeginning(text):
'''
Print the C-Style and Vim comment block start tokens
so that text editors can recognize places to automatically fold up the tests
'''

# Why the hex escapes? So we don't fold our own code!
print(("\x2F*=== " + text + " ").ljust(75, '=')+'\x7B\x7B\x7B')

def printTestEnd():
'''
Print the C-Style and Vim comment block end tokens
so that text editors can recognize places to automatically fold up the tests
'''
print(('=' *75)+"}}}*/")

def inFilter(filepath:str)->bool:
'''
Tests if filepath matches --filter and/or --exclude,
always returns False if --start-at hasn't been satisfied yet
'''
if argv.start_at is None:
inFilter.should_start_taking = True # type: bool
elif getattr(inFilter, "should_start_taking", None) is None:
inFilter.should_start_taking = False

if inFilter.should_start_taking is False:
inFilter.should_start_taking = bool(argv.start_at and re.search(argv.start_at, filepath))
if inFilter.should_start_taking is False:
# We still haven't found it!
return False

passes = True

if argv.filter is not None:
passes &= bool(re.search(argv.filter, filepath))

if argv.exclude is not None:
passes &= not re.search(argv.exclude, filepath)

return passes

exit_code = 0
for root, dirs, files in os.walk('./tests'):
filtered_files = list(filter(lambda file: file.endswith('.test.py') and
inFilter(os.path.join(root, file)),
files))
filtered_files = list(filter(lambda file: file.endswith('.test.py'), files))
if exit_code != 0:
break

Expand All @@ -149,102 +36,6 @@ def inFilter(filepath:str)->bool:
pyFile = os.path.join(root, pyFile)
blendFile = pyFile.replace('.py', '.blend')

if not (argv.quiet or argv.print_fails):
printTestBeginning("Running file " + pyFile)

blender_args = [
argv.blender,
'--addons',
'io_mesh_w3d',
'--factory-startup',
'-noaudio',
'-b'
]

if argv.no_factory_startup:
blender_args.remove('--factory-startup')

if os.path.exists(blendFile):
blender_args.append(blendFile)
else:
if not (argv.quiet or argv.print_fails):
print("WARNING: Blender file " + blendFile + " does not exist")
printTestEnd()

blender_args.extend(['--python', pyFile])

if argv.force_blender_debug:
blender_args.append('--debug')

# Small Hack!
# Blender stops parsing after '--', so we can append the test runner
# args and bridge the gap without anything fancy!
blender_args.extend(['--']+sys.argv[1:])

if (not argv.quiet and
(argv.force_blender_debug)):
# print the command used to execute the script
# to be able to easily re-run it manually to get better error output
print(' '.join(blender_args))

#Run Blender, normalize output line endings because Windows is dumb
out = subprocess.check_output(blender_args, stderr = subprocess.STDOUT, universal_newlines=True) # type: str
if not (argv.quiet or argv.print_fails):
print(out)

# TestResults from the current test
testsRun, errors, failures, skipped = (0,) * 4
try:
results = re.search(TEST_RESULTS_REGEX, out)
except:
# Oh goodie, more string matching!
# I'm sure this won't ever come back to bite us!
# If we're ever using assertRaises,
# hopefully we'll figure out something better! -Ted, 8/14/18
assert results is not None or "Traceback" in out, \
"Test runner must print correct results string at end or have suffered an unrecoverable error"
total_errors += 1
errors = 1
else:
testsRun, errors, failures, skipped = (
int(results.group('testsRun')),
int(results.group('errors')),
int(results.group('failures')),
int(results.group('skipped'))
)

total_testsCompleted += testsRun
total_errors += errors
total_failures += failures
total_skipped += skipped
finally:
if errors or failures:
if argv.print_fails:
printTestBeginning("Running file %s - FAILED" % (pyFile))
print(out)
printTestEnd()
else:
print('%s FAILED' % pyFile)

if not argv.keep_going:
exit_code = 1
else:
exit_code = 0
elif argv.quiet or argv.print_fails:
print('%s passed' % pyFile)

#THIS IS THE LAST THING TO PRINT BEFORE A TEST ENDS
#Its a little easier to see the boundaries between test suites,
#given that there is a mess of print statements from Python, unittest, the XPlane2Blender logger,
#Blender, and more in there sometimes
if not (argv.quiet or argv.print_fails):
printTestEnd()

# Final Result String Benifits
# - --continue concisely tells how many tests failed
# - Just enough more info for --quiet
# - No matter what uselles noise Blender and unittset spit out the
# end of the log has the final answer
print((
"FINAL RESULTS: {total_testsCompleted} {test_str} completed,"
" {total_errors} errors,"
Expand Down

0 comments on commit 07186b8

Please sign in to comment.