mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
[FunC] Enrich and refactor testing framework, add negative tests
* fully refactor run_tests.py, make it extensible for the future * an ability to write @compilation_should_fail tests * an ability to launch run_tests.py for a single .fc file * keep run_tests.js in sync with run_tests.py * extract legacy_tests names/hashes to a separate file shared between legacy_tester.py and legacy_tester.js
This commit is contained in:
parent
0bc6305f96
commit
a5d2a1003f
9 changed files with 615 additions and 203 deletions
|
@ -1,27 +1,68 @@
|
|||
const fs = require('fs/promises');
|
||||
// Usage: `node legacy_tests.js` from current dir, providing some env (see getenv() calls).
|
||||
// This is a JS version of legacy_tester.py to test FunC compiled to WASM.
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path')
|
||||
const process = require('process');
|
||||
const { compileWasm, compileFile } = require('./wasm_tests_common');
|
||||
|
||||
|
||||
/** @return {string} */
|
||||
function getenv(name, def = null) {
|
||||
if (name in process.env)
|
||||
return process.env[name]
|
||||
if (def === null) {
|
||||
console.log(`Environment variable ${name} is not set`)
|
||||
process.exit(1)
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
const FUNCFIFTLIB_MODULE = getenv('FUNCFIFTLIB_MODULE')
|
||||
const FUNCFIFTLIB_WASM = getenv('FUNCFIFTLIB_WASM')
|
||||
const TESTS_DIR = "legacy_tests"
|
||||
|
||||
/**
|
||||
* @return {{filename: string, code_hash: BigInt}[]}
|
||||
*/
|
||||
function load_legacy_tests_list(jsonl_filename) {
|
||||
let contents = fs.readFileSync(jsonl_filename)
|
||||
let results = [...contents.toString().matchAll(/^\[\s*"(.*?)"\s*,\s*(.*?)\s*]/gms)]
|
||||
return results.map((line) => ({
|
||||
filename: line[1].trim(),
|
||||
code_hash: BigInt(line[2]),
|
||||
}))
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const tests = JSON.parse((await fs.readFile('../legacy_tests.json')).toString('utf-8'))
|
||||
const tests = load_legacy_tests_list('legacy_tests.jsonl')
|
||||
|
||||
for (const [filename, hashstr] of tests) {
|
||||
if (filename.includes('storage-provider')) continue;
|
||||
for (let ti = 0; ti < tests.length; ++ti) {
|
||||
const {filename: filename_rel, code_hash} = tests[ti]
|
||||
const filename = path.join(TESTS_DIR, filename_rel)
|
||||
console.log(`Running test ${ti + 1}/${tests.length}: ${filename_rel}`)
|
||||
|
||||
const mod = await compileWasm()
|
||||
if (filename.includes('storage-provider')) {
|
||||
console.log(" Skip");
|
||||
continue;
|
||||
}
|
||||
|
||||
const response = await compileFile(mod, filename);
|
||||
const wasmModule = await compileWasm(FUNCFIFTLIB_MODULE, FUNCFIFTLIB_WASM)
|
||||
const response = compileFile(wasmModule, filename);
|
||||
|
||||
if (response.status !== 'ok') {
|
||||
console.error(response);
|
||||
throw new Error('Could not compile ' + filename);
|
||||
throw new Error(`Could not compile ${filename}`);
|
||||
}
|
||||
|
||||
if (BigInt('0x' + response.codeHashHex) !== BigInt(hashstr)) {
|
||||
throw new Error('Compilation result is different for ' + filename);
|
||||
if (BigInt('0x' + response.codeHashHex) !== code_hash) {
|
||||
throw new Error(`Code hash is different for ${filename}`);
|
||||
}
|
||||
|
||||
console.log(filename, 'ok');
|
||||
console.log(' OK ');
|
||||
}
|
||||
|
||||
console.log(`Done ${tests.length}`)
|
||||
}
|
||||
|
||||
main()
|
||||
main().catch(console.error)
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
# Usage: `legacy_tests.py` from current dir, providing some env (see getenv() calls).
|
||||
# Unlike run_tests.py, it launches tests from legacy_tests/ folder (which are real-world contracts)
|
||||
# and checks that code hashes are expected (that contracts are compiled exactly the same way).
|
||||
# In other words, it doesn't execute TVM, it just compiles fift to acquire a contract hash.
|
||||
# In the future, we may merge these tests with regular ones (when the testing framework becomes richer).
|
||||
# Note, that there is also legacy_tester.js to test FunC compiled to WASM.
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
@ -7,39 +15,6 @@ import shutil
|
|||
|
||||
add_pragmas = [] #["allow-post-modification", "compute-asm-ltr"];
|
||||
|
||||
tests = [
|
||||
# note, that deployed version of elector,config and multisig differ since it is compilled with func-0.1.0.
|
||||
# Newer compillators optimize arithmetic and logic expression that can be calculated at the compile time
|
||||
["elector/elector-code.fc", 115226404411715505328583639896096915745686314074575650766750648324043316883483],
|
||||
["config/config-code.fc", 10913070768607625342121305745084703121685937915388357634624451844356456145601],
|
||||
["eth-bridge-multisig/multisig-code.fc", 101509909129354488841890823627011033360100627957439967918234053299675481277954],
|
||||
|
||||
["bsc-bridge-collector/votes-collector.fc", 62190447221288642706570413295807615918589884489514159926097051017036969900417],
|
||||
["uni-lock-wallet/uni-lockup-wallet.fc", 61959738324779104851267145467044677651344601417998258530238254441977103654381],
|
||||
["nft-collection/nft-collection-editable.fc", 45561997735512210616567774035540357815786262097548276229169737015839077731274],
|
||||
["dns-collection/nft-collection.fc", 107999822699841936063083742021519765435859194241091312445235370766165379261859],
|
||||
|
||||
|
||||
# note, that deployed version of tele-nft-item differs since it is compilled with func-0.3.0.
|
||||
# After introducing of try/catch construction, c2 register is not always the default one.
|
||||
# Thus it is necessary to save it upon jumps, differences of deployed and below compilled is that
|
||||
# "c2 SAVE" is added to the beginning of recv_internal. It does not change behavior.
|
||||
["tele-nft-item/nft-item.fc", 69777543125381987786450436977742010705076866061362104025338034583422166453344],
|
||||
|
||||
["storage/storage-contract.fc", 91377830060355733016937375216020277778264560226873154627574229667513068328151],
|
||||
["storage/storage-provider.fc", 13618336676213331164384407184540461509022654507176709588621016553953760588122],
|
||||
["nominator-pool/pool.fc", 69767057279163099864792356875696330339149706521019810113334238732928422055375],
|
||||
["jetton-minter/jetton-minter.fc", 9028309926287301331466371999814928201427184114165428257502393474125007156494],
|
||||
["gg-marketplace/nft-marketplace-v2.fc", 92199806964112524639740773542356508485601908152150843819273107618799016205930],
|
||||
["jetton-wallet/jetton-wallet.fc", 86251125787443633057458168028617933212663498001665054651523310772884328206542],
|
||||
["whales-nominators/nominators.fc", 8941364499854379927692172316865293429893094891593442801401542636695127885153],
|
||||
|
||||
|
||||
["tact-examples/treasure_Treasure.code.fc", 13962538639825790677138656603323869918938565499584297120566680287245364723897],
|
||||
["tact-examples/jetton_SampleJetton.code.fc", 94076762218493729104783735200107713211245710256802265203823917715299139499110],
|
||||
["tact-examples/jetton_JettonDefaultWallet.code.fc", 29421313492520031238091587108198906058157443241743283101866538036369069620563],
|
||||
["tact-examples/maps_MapTestContract.code.fc", 22556550222249123835909180266811414538971143565993192846012583552876721649744],
|
||||
]
|
||||
|
||||
def getenv(name, default=None):
|
||||
if name in os.environ:
|
||||
|
@ -49,8 +24,10 @@ def getenv(name, default=None):
|
|||
exit(1)
|
||||
return default
|
||||
|
||||
|
||||
FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func")
|
||||
FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift")
|
||||
FIFT_LIBS_FOLDER = getenv("FIFTPATH") # this env is needed for fift to work properly
|
||||
TMP_DIR = tempfile.mkdtemp()
|
||||
|
||||
COMPILED_FIF = os.path.join(TMP_DIR, "compiled.fif")
|
||||
|
@ -58,6 +35,17 @@ RUNNER_FIF = os.path.join(TMP_DIR, "runner.fif")
|
|||
|
||||
TESTS_DIR = "legacy_tests"
|
||||
|
||||
|
||||
def load_legacy_tests_list(jsonl_filename: str) -> list[tuple[str, int]]:
|
||||
with open(jsonl_filename) as fd:
|
||||
contents = fd.read()
|
||||
results = re.findall('^\[\s*"(.*?)"\s*,\s*(.*?)\s*]', contents, re.MULTILINE)
|
||||
return list(map(lambda line: (line[0], int(line[1])), results))
|
||||
|
||||
|
||||
tests = load_legacy_tests_list('legacy_tests.jsonl')
|
||||
|
||||
|
||||
class ExecutionError(Exception):
|
||||
pass
|
||||
|
||||
|
@ -119,12 +107,11 @@ def get_version():
|
|||
return s.strip()
|
||||
|
||||
success = 0
|
||||
for ti, t in enumerate(tests):
|
||||
tf, th = t
|
||||
print(" Running test %d/%d: %s" % (ti + 1, len(tests), tf), file=sys.stderr)
|
||||
tf = os.path.join(TESTS_DIR, tf)
|
||||
for ti, (filename_rel, code_hash) in enumerate(tests):
|
||||
print("Running test %d/%d: %s" % (ti + 1, len(tests), filename_rel), file=sys.stderr)
|
||||
try:
|
||||
compile_func(tf)
|
||||
filename = os.path.join(TESTS_DIR, filename_rel)
|
||||
compile_func(filename)
|
||||
except ExecutionError as e:
|
||||
print(file=sys.stderr)
|
||||
print("Compilation error", file=sys.stderr)
|
||||
|
@ -136,8 +123,8 @@ for ti, t in enumerate(tests):
|
|||
|
||||
try:
|
||||
func_out = run_runner()
|
||||
if func_out != th:
|
||||
raise ExecutionError("Error : expected '%d', found '%d'" % (th, func_out))
|
||||
if func_out != code_hash:
|
||||
raise ExecutionError("Error : expected '%d', found '%d'" % (code_hash, func_out))
|
||||
success += 1
|
||||
except ExecutionError as e:
|
||||
print(e, file=sys.stderr)
|
||||
|
@ -148,4 +135,4 @@ for ti, t in enumerate(tests):
|
|||
print(" OK ", file=sys.stderr)
|
||||
|
||||
print(get_version())
|
||||
print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr)
|
||||
print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr)
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
[["elector/elector-code.fc", "115226404411715505328583639896096915745686314074575650766750648324043316883483"], ["config/config-code.fc", "10913070768607625342121305745084703121685937915388357634624451844356456145601"], ["eth-bridge-multisig/multisig-code.fc", "101509909129354488841890823627011033360100627957439967918234053299675481277954"], ["bsc-bridge-collector/votes-collector.fc", "62190447221288642706570413295807615918589884489514159926097051017036969900417"], ["uni-lock-wallet/uni-lockup-wallet.fc", "61959738324779104851267145467044677651344601417998258530238254441977103654381"], ["nft-collection/nft-collection-editable.fc", "45561997735512210616567774035540357815786262097548276229169737015839077731274"], ["dns-collection/nft-collection.fc", "107999822699841936063083742021519765435859194241091312445235370766165379261859"], ["tele-nft-item/nft-item.fc", "69777543125381987786450436977742010705076866061362104025338034583422166453344"], ["storage/storage-contract.fc", "91377830060355733016937375216020277778264560226873154627574229667513068328151"], ["storage/storage-provider.fc", "13618336676213331164384407184540461509022654507176709588621016553953760588122"], ["nominator-pool/pool.fc", "69767057279163099864792356875696330339149706521019810113334238732928422055375"], ["jetton-minter/jetton-minter.fc", "9028309926287301331466371999814928201427184114165428257502393474125007156494"], ["gg-marketplace/nft-marketplace-v2.fc", "92199806964112524639740773542356508485601908152150843819273107618799016205930"], ["jetton-wallet/jetton-wallet.fc", "86251125787443633057458168028617933212663498001665054651523310772884328206542"], ["whales-nominators/nominators.fc", "8941364499854379927692172316865293429893094891593442801401542636695127885153"], ["tact-examples/treasure_Treasure.code.fc", "13962538639825790677138656603323869918938565499584297120566680287245364723897"], ["tact-examples/jetton_SampleJetton.code.fc", "94076762218493729104783735200107713211245710256802265203823917715299139499110"], ["tact-examples/jetton_JettonDefaultWallet.code.fc", "29421313492520031238091587108198906058157443241743283101866538036369069620563"], ["tact-examples/maps_MapTestContract.code.fc", "22556550222249123835909180266811414538971143565993192846012583552876721649744"]]
|
37
crypto/func/auto-tests/legacy_tests.jsonl
Normal file
37
crypto/func/auto-tests/legacy_tests.jsonl
Normal file
|
@ -0,0 +1,37 @@
|
|||
// This file is used by both legacy_tester.py and legacy_tester.js.
|
||||
// Its extension is .jsonl (not just .json) in order to use comments.
|
||||
// It contains a simple format ["filename_rel",bigint_hash]
|
||||
// and is parsed just using regexp ^\[\s*"(.*?)"\s*,\s*(.*?)\s*]
|
||||
// Some tests can be commented out, or they can be multiline, it works.
|
||||
|
||||
// note, that deployed version of elector,config and multisig differ since it is compiled with func-0.1.0.
|
||||
// Newer compilers optimize arithmetic and logic expression that can be calculated at the compile time
|
||||
["elector/elector-code.fc", 115226404411715505328583639896096915745686314074575650766750648324043316883483]
|
||||
["config/config-code.fc", 10913070768607625342121305745084703121685937915388357634624451844356456145601]
|
||||
["eth-bridge-multisig/multisig-code.fc", 101509909129354488841890823627011033360100627957439967918234053299675481277954]
|
||||
|
||||
["bsc-bridge-collector/votes-collector.fc", 62190447221288642706570413295807615918589884489514159926097051017036969900417]
|
||||
["uni-lock-wallet/uni-lockup-wallet.fc", 61959738324779104851267145467044677651344601417998258530238254441977103654381]
|
||||
["nft-collection/nft-collection-editable.fc", 45561997735512210616567774035540357815786262097548276229169737015839077731274]
|
||||
["dns-collection/nft-collection.fc", 107999822699841936063083742021519765435859194241091312445235370766165379261859]
|
||||
|
||||
|
||||
// note, that deployed version of tele-nft-item differs since it is compiled with func-0.3.0.
|
||||
// After introducing of try/catch construction, c2 register is not always the default one.
|
||||
// Thus it is necessary to save it upon jumps, differences of deployed and below compiled is that
|
||||
// "c2 SAVE" is added to the beginning of recv_internal. It does not change behavior.
|
||||
["tele-nft-item/nft-item.fc", 69777543125381987786450436977742010705076866061362104025338034583422166453344]
|
||||
|
||||
["storage/storage-contract.fc", 91377830060355733016937375216020277778264560226873154627574229667513068328151]
|
||||
["storage/storage-provider.fc", 13618336676213331164384407184540461509022654507176709588621016553953760588122]
|
||||
["nominator-pool/pool.fc", 69767057279163099864792356875696330339149706521019810113334238732928422055375]
|
||||
["jetton-minter/jetton-minter.fc", 9028309926287301331466371999814928201427184114165428257502393474125007156494]
|
||||
["gg-marketplace/nft-marketplace-v2.fc", 92199806964112524639740773542356508485601908152150843819273107618799016205930]
|
||||
["jetton-wallet/jetton-wallet.fc", 86251125787443633057458168028617933212663498001665054651523310772884328206542]
|
||||
["whales-nominators/nominators.fc", 8941364499854379927692172316865293429893094891593442801401542636695127885153]
|
||||
|
||||
|
||||
["tact-examples/treasure_Treasure.code.fc", 13962538639825790677138656603323869918938565499584297120566680287245364723897]
|
||||
["tact-examples/jetton_SampleJetton.code.fc", 94076762218493729104783735200107713211245710256802265203823917715299139499110]
|
||||
["tact-examples/jetton_JettonDefaultWallet.code.fc", 29421313492520031238091587108198906058157443241743283101866538036369069620563]
|
||||
["tact-examples/maps_MapTestContract.code.fc", 22556550222249123835909180266811414538971143565993192846012583552876721649744]
|
|
@ -1,77 +1,277 @@
|
|||
const fs = require('fs/promises');
|
||||
// Usage: `node run_tests.js tests_dir` OR `node run_tests.js test_file.fc`
|
||||
// from current dir, providing some env (see getenv() calls).
|
||||
// This is a JS version of run_tests.py to test FunC compiled to WASM.
|
||||
// Don't forget to keep it identical to Python version!
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { compileWasm, compileFile } = require('./wasm_tests_common');
|
||||
const { execSync } = require('child_process');
|
||||
const child_process = require('child_process');
|
||||
|
||||
async function main() {
|
||||
const compiledPath = path.join(os.tmpdir(), 'compiled.fif');
|
||||
const runnerPath = path.join(os.tmpdir(), 'runner.fif');
|
||||
function print(...args) {
|
||||
console.log(...args)
|
||||
}
|
||||
|
||||
const tests = (await fs.readdir('.')).filter(f => f.endsWith('.fc')).sort();
|
||||
/** @return {string} */
|
||||
function getenv(name, def = null) {
|
||||
if (name in process.env)
|
||||
return process.env[name]
|
||||
if (def === null) {
|
||||
print(`Environment variable ${name} is not set`)
|
||||
process.exit(1)
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
const mathChars = '0x123456789()+-*/<>'.split('')
|
||||
const FUNCFIFTLIB_MODULE = getenv('FUNCFIFTLIB_MODULE')
|
||||
const FUNCFIFTLIB_WASM = getenv('FUNCFIFTLIB_WASM')
|
||||
const FIFT_EXECUTABLE = getenv('FIFT_EXECUTABLE')
|
||||
const FIFT_LIBS_FOLDER = getenv('FIFTPATH') // this env is needed for fift to work properly
|
||||
const TMP_DIR = os.tmpdir()
|
||||
|
||||
for (const testFile of tests) {
|
||||
const mod = await compileWasm()
|
||||
|
||||
const result = await compileFile(mod, testFile)
|
||||
|
||||
if (result.status !== 'ok') {
|
||||
console.error(result);
|
||||
throw new Error('Could not compile ' + filename);
|
||||
class CmdLineOptions {
|
||||
constructor(/**string[]*/ argv) {
|
||||
if (argv.length !== 3) {
|
||||
print("Usage: node run_tests.js tests_dir OR node run_tests.js test_file.fc")
|
||||
process.exit(1)
|
||||
}
|
||||
if (!fs.existsSync(argv[2])) {
|
||||
print(`Input '${argv[2]}' doesn't exist`)
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const fileLines = (await fs.readFile(testFile)).toString('utf-8').split('\n');
|
||||
|
||||
const testCases = [];
|
||||
|
||||
for (const line of fileLines) {
|
||||
const parts = line.split('|').map(c => c.trim());
|
||||
|
||||
if (parts.length !== 4 || parts[0] !== 'TESTCASE') continue;
|
||||
|
||||
const processedInputs = [];
|
||||
|
||||
for (const input of parts[2].split(' ')) {
|
||||
if (input.includes('x{')) {
|
||||
processedInputs.push(input);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (input.length === 0) {
|
||||
continue
|
||||
}
|
||||
|
||||
const replacedInput = input.split('').filter(c => mathChars.includes(c)).join('').replace('//', '/').replace(/([0-9a-f])($|[^0-9a-fx])/gmi, '$1n$2')
|
||||
|
||||
processedInputs.push(eval(replacedInput).toString());
|
||||
}
|
||||
|
||||
testCases.push([parts[1], processedInputs.join(' '), parts[3]]);
|
||||
if (fs.lstatSync(argv[2]).isDirectory()) {
|
||||
this.tests_dir = argv[2]
|
||||
this.test_file = null
|
||||
} else {
|
||||
this.tests_dir = path.dirname(argv[2])
|
||||
this.test_file = argv[2]
|
||||
}
|
||||
}
|
||||
|
||||
await fs.writeFile(compiledPath, '"Asm.fif" include\n' + JSON.parse('"' + result.fiftCode + '"'));
|
||||
await fs.writeFile(runnerPath, `"${compiledPath}" include <s constant code\n${testCases.map(t => `${t[1]} ${t[0]} code 1 runvmx abort"exitcode is not 0" .s cr { drop } depth 1- times`).join('\n')}`)
|
||||
/** @return {string[]} */
|
||||
find_tests() {
|
||||
if (this.test_file) // an option to run (debug) a single test
|
||||
return [this.test_file]
|
||||
|
||||
const fiftResult = execSync(`${process.env.FIFT_EXECUTABLE || 'fift'} -I ${process.env.FIFT_LIBS} /tmp/runner.fif`, {
|
||||
stdio: ['pipe', 'pipe', 'ignore']
|
||||
}).toString('utf-8')
|
||||
|
||||
const testResults = fiftResult.split('\n').map(s => s.trim()).filter(s => s.length > 0)
|
||||
|
||||
if (testResults.length !== testCases.length) {
|
||||
throw new Error(`Got ${testResults.length} results but there are ${testCases.length} cases`)
|
||||
}
|
||||
|
||||
for (let i = 0; i < testResults.length; i++) {
|
||||
if (testResults[i] !== testCases[i][2]) {
|
||||
throw new Error(`Unequal result ${testResults[i]} and case ${testCases[i][2]}`)
|
||||
}
|
||||
}
|
||||
|
||||
console.log(testFile, 'ok')
|
||||
let tests = fs.readdirSync(this.tests_dir).filter(f => f.endsWith('.fc') || f.endsWith(".func"))
|
||||
tests.sort()
|
||||
return tests.map(f => path.join(this.tests_dir, f))
|
||||
}
|
||||
}
|
||||
|
||||
main()
|
||||
|
||||
class ParseInputError extends Error {
|
||||
}
|
||||
|
||||
class FuncCompilationFailedError extends Error {
|
||||
constructor(/**string*/ message, /**string*/ stderr) {
|
||||
super(message);
|
||||
this.stderr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
class FuncCompilationSucceededError extends Error {
|
||||
}
|
||||
|
||||
class FiftExecutionFailedError extends Error {
|
||||
constructor(/**string*/ message, /**string*/ stderr) {
|
||||
super(message);
|
||||
this.stderr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
class CompareOutputError extends Error {
|
||||
constructor(/**string*/ message, /**string*/ output) {
|
||||
super(message);
|
||||
this.output = output
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* In positive tests, there are several testcases "input X should produce output Y".
|
||||
*/
|
||||
class FuncTestCaseInputOutput {
|
||||
static reJustNumber = /^[-+]?\d+$/
|
||||
static reMathExpr = /^[0x123456789()+\-*/<>]*$/
|
||||
|
||||
constructor(/**string*/ method_id_str, /**string*/ input_str, /**string*/ output_str) {
|
||||
let processed_inputs = []
|
||||
for (let in_arg of input_str.split(' ')) {
|
||||
if (in_arg.length === 0)
|
||||
continue
|
||||
else if (in_arg.startsWith("x{") || FuncTestCaseInputOutput.reJustNumber.test(in_arg))
|
||||
processed_inputs.push(in_arg)
|
||||
else if (FuncTestCaseInputOutput.reMathExpr.test(in_arg))
|
||||
// replace "3<<254" with "3n<<254n" (big number) before eval (in Python we don't need this)
|
||||
processed_inputs.push(eval(in_arg.replace('//', '/').replace(/(\d)($|\D)/gmi, '$1n$2')).toString())
|
||||
else
|
||||
throw new ParseInputError(`'${in_arg}' can't be evaluated`)
|
||||
}
|
||||
|
||||
this.method_id = +method_id_str
|
||||
this.input = processed_inputs.join(' ')
|
||||
this.expected_output = output_str
|
||||
}
|
||||
|
||||
check(/**string[]*/ stdout_lines, /**number*/ line_idx) {
|
||||
if (stdout_lines[line_idx] !== this.expected_output)
|
||||
throw new CompareOutputError(`error on case ${line_idx + 1}: expected '${this.expected_output}', found '${stdout_lines[line_idx]}'`, stdout_lines.join("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* @stderr checks, when compilation fails, that stderr (compilation error) is expected.
|
||||
*/
|
||||
class FuncTestCaseStderrIncludes {
|
||||
constructor(/**string*/ expected_substr) {
|
||||
this.expected_substr = expected_substr
|
||||
}
|
||||
|
||||
check(/**string*/ stderr) {
|
||||
if (!stderr.includes(this.expected_substr))
|
||||
throw new CompareOutputError(`pattern '${this.expected_substr}' not found in stderr`, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
class FuncTestFile {
|
||||
constructor(/**string*/ func_filename, /**string*/ artifacts_folder) {
|
||||
this.func_filename = func_filename
|
||||
this.artifacts_folder = artifacts_folder
|
||||
this.compilation_should_fail = false
|
||||
/** @type {FuncTestCaseStderrIncludes[]} */
|
||||
this.stderr_includes = []
|
||||
/** @type {FuncTestCaseInputOutput[]} */
|
||||
this.input_output = []
|
||||
}
|
||||
|
||||
parse_input_from_func_file() {
|
||||
const lines = fs.readFileSync(this.func_filename, 'utf-8').split(/\r?\n/)
|
||||
let i = 0
|
||||
while (i < lines.length) {
|
||||
const line = lines[i]
|
||||
if (line.startsWith('TESTCASE')) {
|
||||
let s = line.split("|").map(p => p.trim())
|
||||
if (s.length !== 4)
|
||||
throw new ParseInputError(`incorrect format of TESTCASE: ${line}`)
|
||||
this.input_output.push(new FuncTestCaseInputOutput(s[1], s[2], s[3]))
|
||||
} else if (line.startsWith('@compilation_should_fail')) {
|
||||
this.compilation_should_fail = true
|
||||
} else if (line.startsWith('@stderr')) {
|
||||
this.stderr_includes.push(new FuncTestCaseStderrIncludes(line.substring(7).trim()))
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if (this.input_output.length === 0 && !this.compilation_should_fail)
|
||||
throw new ParseInputError("no TESTCASE present")
|
||||
if (this.input_output.length !== 0 && this.compilation_should_fail)
|
||||
throw new ParseInputError("TESTCASE present, but compilation_should_fail")
|
||||
}
|
||||
|
||||
get_compiled_fif_filename() {
|
||||
return this.artifacts_folder + "/compiled.fif"
|
||||
}
|
||||
|
||||
get_runner_fif_filename() {
|
||||
return this.artifacts_folder + "/runner.fif"
|
||||
}
|
||||
|
||||
async run_and_check() {
|
||||
const wasmModule = await compileWasm(FUNCFIFTLIB_MODULE, FUNCFIFTLIB_WASM)
|
||||
let res = compileFile(wasmModule, this.func_filename)
|
||||
let exit_code = res.status === 'ok' ? 0 : 1
|
||||
let stderr = res.message
|
||||
let stdout = ''
|
||||
|
||||
if (exit_code === 0 && this.compilation_should_fail)
|
||||
throw new FuncCompilationSucceededError("compilation succeeded, but it should have failed")
|
||||
|
||||
if (exit_code !== 0 && this.compilation_should_fail) {
|
||||
for (let should_include of this.stderr_includes)
|
||||
should_include.check(stderr)
|
||||
return
|
||||
}
|
||||
|
||||
if (exit_code !== 0 && !this.compilation_should_fail)
|
||||
throw new FuncCompilationFailedError(`func exit_code = ${exit_code}`, stderr)
|
||||
|
||||
fs.writeFileSync(this.get_compiled_fif_filename(), `"Asm.fif" include\n${res.fiftCode}`)
|
||||
{
|
||||
let runner = `"${this.get_compiled_fif_filename()}" include <s constant code\n`
|
||||
for (let t of this.input_output)
|
||||
runner += `${t.input} ${t.method_id} code 1 runvmx abort"exitcode is not 0" .s cr { drop } depth 1- times\n`
|
||||
fs.writeFileSync(this.get_runner_fif_filename(), runner)
|
||||
}
|
||||
|
||||
res = child_process.spawnSync(FIFT_EXECUTABLE, [this.get_runner_fif_filename()])
|
||||
exit_code = res.status
|
||||
stderr = (res.stderr || res.error).toString()
|
||||
stdout = (res.stdout || '').toString()
|
||||
let stdout_lines = stdout.split("\n").map(x => x.trim()).filter(s => s.length > 0)
|
||||
|
||||
if (exit_code)
|
||||
throw new FiftExecutionFailedError(`fift exit_code = ${exit_code}`, stderr)
|
||||
|
||||
if (stdout_lines.length !== this.input_output.length)
|
||||
throw new CompareOutputError(`unexpected number of fift output: ${stdout_lines.length} lines, but ${this.input_output.length} testcases`, stdout)
|
||||
|
||||
for (let i = 0; i < stdout_lines.length; ++i)
|
||||
this.input_output[i].check(stdout_lines, i)
|
||||
}
|
||||
}
|
||||
|
||||
async function run_all_tests(/**string[]*/ tests) {
|
||||
for (let ti = 0; ti < tests.length; ++ti) {
|
||||
let func_filename = tests[ti]
|
||||
print(`Running test ${ti + 1}/${tests.length}: ${func_filename}`)
|
||||
|
||||
let artifacts_folder = path.join(TMP_DIR, func_filename)
|
||||
let testcase = new FuncTestFile(func_filename, artifacts_folder)
|
||||
|
||||
try {
|
||||
if (!fs.existsSync(artifacts_folder))
|
||||
fs.mkdirSync(artifacts_folder, {recursive: true})
|
||||
testcase.parse_input_from_func_file()
|
||||
await testcase.run_and_check()
|
||||
fs.rmSync(artifacts_folder, {recursive: true})
|
||||
|
||||
if (testcase.compilation_should_fail)
|
||||
print(" OK, compilation failed as it should")
|
||||
else
|
||||
print(` OK, ${testcase.input_output.length} cases`)
|
||||
} catch (e) {
|
||||
if (e instanceof ParseInputError) {
|
||||
print(" Error parsing input:", e.message)
|
||||
process.exit(2)
|
||||
} else if (e instanceof FuncCompilationFailedError) {
|
||||
print(" Error compiling func:", e.message)
|
||||
print(" stderr:")
|
||||
print(e.stderr.trimEnd())
|
||||
process.exit(2)
|
||||
} else if (e instanceof FiftExecutionFailedError) {
|
||||
print(" Error executing fift:", e.message)
|
||||
print(" stderr:")
|
||||
print(e.stderr.trimEnd())
|
||||
print(" compiled.fif at:", testcase.get_compiled_fif_filename())
|
||||
process.exit(2)
|
||||
} else if (e instanceof CompareOutputError) {
|
||||
print(" Mismatch in output:", e.message)
|
||||
print(" Full output:")
|
||||
print(e.output.trimEnd())
|
||||
print(" Was compiled to:", testcase.get_compiled_fif_filename())
|
||||
process.exit(2)
|
||||
}
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const tests = new CmdLineOptions(process.argv).find_tests()
|
||||
print(`Found ${tests.length} tests`)
|
||||
run_all_tests(tests).then(
|
||||
() => print(`Done, ${tests.length} tests`),
|
||||
console.error
|
||||
)
|
||||
|
|
|
@ -1,5 +1,18 @@
|
|||
# Usage: `run_tests.py tests_dir` OR `run_tests.py test_file.fc`
|
||||
# from current dir, providing some env (see getenv() calls).
|
||||
# Every .fc file should provide {- testcase description in a comment -}, consider tests/ folder.
|
||||
#
|
||||
# Tests for FunC can be
|
||||
# * positive (compiled to .fif, run with fift, compared output with the one expected)
|
||||
# * negative (compilation fails, and it's expected; patterns in stderr can be specified)
|
||||
#
|
||||
# Note, that there is also run_tests.js to test FunC compiled to WASM.
|
||||
# Don't forget to keep it identical to Python version!
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
@ -16,95 +29,224 @@ def getenv(name, default=None):
|
|||
|
||||
FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func")
|
||||
FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift")
|
||||
FIFT_LIBS_FOLDER = getenv("FIFTPATH") # this env is needed for fift to work properly
|
||||
TMP_DIR = tempfile.mkdtemp()
|
||||
COMPILED_FIF = os.path.join(TMP_DIR, "compiled.fif")
|
||||
RUNNER_FIF = os.path.join(TMP_DIR, "runner.fif")
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage : run_tests.py tests_dir", file=sys.stderr)
|
||||
exit(1)
|
||||
TESTS_DIR = sys.argv[1]
|
||||
|
||||
|
||||
class ExecutionError(Exception):
|
||||
class CmdLineOptions:
|
||||
def __init__(self, argv: list[str]):
|
||||
if len(argv) != 2:
|
||||
print("Usage: run_tests.py tests_dir OR run_tests.py test_file.fc", file=sys.stderr)
|
||||
exit(1)
|
||||
if not os.path.exists(argv[1]):
|
||||
print("Input '%s' doesn't exist" % argv[1], file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
if os.path.isdir(argv[1]):
|
||||
self.tests_dir = argv[1]
|
||||
self.test_file = None
|
||||
else:
|
||||
self.tests_dir = os.path.dirname(argv[1])
|
||||
self.test_file = argv[1]
|
||||
|
||||
def find_tests(self) -> list[str]:
|
||||
if self.test_file is not None: # an option to run (debug) a single test
|
||||
return [self.test_file]
|
||||
|
||||
tests = [f for f in os.listdir(self.tests_dir) if f.endswith(".fc") or f.endswith(".func")]
|
||||
tests.sort()
|
||||
return [os.path.join(self.tests_dir, f) for f in tests]
|
||||
|
||||
|
||||
class ParseInputError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def compile_func(f):
|
||||
res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_FIF, "-SPA", f], capture_output=True, timeout=10)
|
||||
if res.returncode != 0:
|
||||
raise ExecutionError(str(res.stderr, "utf-8"))
|
||||
class FuncCompilationFailedError(Exception):
|
||||
def __init__(self, message: str, stderr: str):
|
||||
super().__init__(message)
|
||||
self.stderr = stderr
|
||||
|
||||
|
||||
def run_runner():
|
||||
res = subprocess.run([FIFT_EXECUTABLE, RUNNER_FIF], capture_output=True, timeout=10)
|
||||
if res.returncode != 0:
|
||||
raise ExecutionError(str(res.stderr, "utf-8"))
|
||||
s = str(res.stdout, "utf-8")
|
||||
s = [x.strip() for x in s.split("\n")]
|
||||
return [x for x in s if x != ""]
|
||||
class FuncCompilationSucceededError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
tests = [s for s in os.listdir(TESTS_DIR) if s.endswith(".fc")]
|
||||
tests.sort()
|
||||
print("Found", len(tests), "tests", file=sys.stderr)
|
||||
for ti, tf in enumerate(tests):
|
||||
print("Running test %d/%d: %s" % (ti + 1, len(tests), tf), file=sys.stderr)
|
||||
tf = os.path.join(TESTS_DIR, tf)
|
||||
try:
|
||||
compile_func(tf)
|
||||
except ExecutionError as e:
|
||||
print(file=sys.stderr)
|
||||
print("Compilation error", file=sys.stderr)
|
||||
print(e, file=sys.stderr)
|
||||
exit(2)
|
||||
with open(tf, "r") as fd:
|
||||
lines = fd.readlines()
|
||||
cases = []
|
||||
for s in lines:
|
||||
s = [x.strip() for x in s.split("|")]
|
||||
if len(s) == 4 and s[0].strip() == "TESTCASE":
|
||||
cases.append(s[1:])
|
||||
if len(cases) == 0:
|
||||
print(file=sys.stderr)
|
||||
print("Error: no test cases", file=sys.stderr)
|
||||
exit(2)
|
||||
class FiftExecutionFailedError(Exception):
|
||||
def __init__(self, message: str, stderr: str):
|
||||
super().__init__(message)
|
||||
self.stderr = stderr
|
||||
|
||||
# preprocess arithmetics in input
|
||||
for i in range(len(cases)):
|
||||
inputs = cases[i][1].split(" ")
|
||||
processed_inputs = ""
|
||||
for in_arg in inputs:
|
||||
if "x{" in in_arg:
|
||||
processed_inputs += in_arg
|
||||
|
||||
class CompareOutputError(Exception):
|
||||
def __init__(self, message: str, output: str):
|
||||
super().__init__(message)
|
||||
self.output = output
|
||||
|
||||
|
||||
class FuncTestCaseInputOutput:
|
||||
"""
|
||||
In positive tests, there are several testcases "input X should produce output Y".
|
||||
They are written as a table:
|
||||
TESTCASE | method_id | input (one or several) | output
|
||||
"""
|
||||
reJustNumber = re.compile("[-+]?\d+")
|
||||
reMathExpr = re.compile("[0x123456789()+\-*/<>]+")
|
||||
|
||||
def __init__(self, method_id_str: str, input_str: str, output_str: str):
|
||||
processed_inputs = []
|
||||
for in_arg in input_str.split(" "):
|
||||
if len(in_arg) == 0:
|
||||
continue
|
||||
# filter and execute
|
||||
# is it safe enough?
|
||||
filtered_in = "".join(filter(lambda x: x in "0x123456789()+-*/<>", in_arg))
|
||||
if filtered_in:
|
||||
processed_inputs += str(eval(filtered_in)) + " "
|
||||
cases[i][1] = processed_inputs.strip()
|
||||
elif in_arg.startswith("x{") or FuncTestCaseInputOutput.reJustNumber.fullmatch(in_arg):
|
||||
processed_inputs.append(in_arg)
|
||||
elif FuncTestCaseInputOutput.reMathExpr.fullmatch(in_arg):
|
||||
processed_inputs.append(str(eval(in_arg)))
|
||||
else:
|
||||
raise ParseInputError("'%s' can't be evaluated" % in_arg)
|
||||
|
||||
with open(RUNNER_FIF, "w") as f:
|
||||
print("\"%s\" include <s constant code" % COMPILED_FIF, file=f)
|
||||
for function, test_in, _ in cases:
|
||||
print(test_in, function, "code 1 runvmx abort\"exitcode is not 0\" .s cr { drop } depth 1- times", file=f)
|
||||
try:
|
||||
func_out = run_runner()
|
||||
if len(func_out) != len(cases):
|
||||
raise ExecutionError("Unexpected number of lines")
|
||||
for i in range(len(func_out)):
|
||||
if func_out[i] != cases[i][2]:
|
||||
raise ExecutionError("Error on case %d: expected '%s', found '%s'" % (i + 1, cases[i][2], func_out[i]))
|
||||
except ExecutionError as e:
|
||||
print(file=sys.stderr)
|
||||
print("Error:", file=sys.stderr)
|
||||
print(e, file=sys.stderr)
|
||||
print(file=sys.stderr)
|
||||
print("Compiled:", file=sys.stderr)
|
||||
with open(COMPILED_FIF, "r") as f:
|
||||
print(f.read(), file=sys.stderr)
|
||||
exit(2)
|
||||
print(" OK, %d cases" % len(cases), file=sys.stderr)
|
||||
self.method_id = int(method_id_str)
|
||||
self.input = " ".join(processed_inputs)
|
||||
self.expected_output = output_str
|
||||
|
||||
print("Done", file=sys.stderr)
|
||||
def check(self, stdout_lines: list[str], line_idx: int):
|
||||
if stdout_lines[line_idx] != self.expected_output:
|
||||
raise CompareOutputError("error on case %d: expected '%s', found '%s'" % (line_idx + 1, self.expected_output, stdout_lines[line_idx]), "\n".join(stdout_lines))
|
||||
|
||||
|
||||
class FuncTestCaseStderrIncludes:
|
||||
"""
|
||||
@stderr checks, when compilation fails, that stderr (compilation error) is expected.
|
||||
"""
|
||||
|
||||
def __init__(self, expected_substr: str):
|
||||
self.expected_substr = expected_substr
|
||||
|
||||
def check(self, stderr: str):
|
||||
if self.expected_substr not in stderr:
|
||||
raise CompareOutputError("pattern '%s' not found in stderr" % self.expected_substr, stderr)
|
||||
|
||||
|
||||
class FuncTestFile:
|
||||
def __init__(self, func_filename: str, artifacts_folder: str):
|
||||
self.func_filename = func_filename
|
||||
self.artifacts_folder = artifacts_folder
|
||||
self.compilation_should_fail = False
|
||||
self.stderr_includes: list[FuncTestCaseStderrIncludes] = []
|
||||
self.input_output: list[FuncTestCaseInputOutput] = []
|
||||
|
||||
def parse_input_from_func_file(self):
|
||||
with open(self.func_filename, "r") as fd:
|
||||
lines = fd.read().splitlines()
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
line = lines[i]
|
||||
if line.startswith("TESTCASE"):
|
||||
s = [x.strip() for x in line.split("|")]
|
||||
if len(s) != 4:
|
||||
raise ParseInputError("incorrect format of TESTCASE: %s" % line)
|
||||
self.input_output.append(FuncTestCaseInputOutput(s[1], s[2], s[3]))
|
||||
elif line.startswith("@compilation_should_fail"):
|
||||
self.compilation_should_fail = True
|
||||
elif line.startswith("@stderr"):
|
||||
self.stderr_includes.append(FuncTestCaseStderrIncludes(line[7:].strip()))
|
||||
i = i + 1
|
||||
|
||||
if len(self.input_output) == 0 and not self.compilation_should_fail:
|
||||
raise ParseInputError("no TESTCASE present")
|
||||
if len(self.input_output) != 0 and self.compilation_should_fail:
|
||||
raise ParseInputError("TESTCASE present, but compilation_should_fail")
|
||||
|
||||
def get_compiled_fif_filename(self):
|
||||
return self.artifacts_folder + "/compiled.fif"
|
||||
|
||||
@property
|
||||
def get_runner_fif_filename(self):
|
||||
return self.artifacts_folder + "/runner.fif"
|
||||
|
||||
def run_and_check(self):
|
||||
res = subprocess.run([FUNC_EXECUTABLE, "-o", self.get_compiled_fif_filename(), "-SPA", self.func_filename], capture_output=True, timeout=10)
|
||||
exit_code = res.returncode
|
||||
stderr = str(res.stderr, "utf-8")
|
||||
stdout = str(res.stdout, "utf-8")
|
||||
|
||||
if exit_code == 0 and self.compilation_should_fail:
|
||||
raise FuncCompilationSucceededError("compilation succeeded, but it should have failed")
|
||||
|
||||
if exit_code != 0 and self.compilation_should_fail:
|
||||
for should_include in self.stderr_includes:
|
||||
should_include.check(stderr)
|
||||
return
|
||||
|
||||
if exit_code != 0 and not self.compilation_should_fail:
|
||||
raise FuncCompilationFailedError("func exit_code = %d" % exit_code, stderr)
|
||||
|
||||
with open(self.get_runner_fif_filename, "w") as f:
|
||||
f.write("\"%s\" include <s constant code\n" % self.get_compiled_fif_filename())
|
||||
for t in self.input_output:
|
||||
f.write("%s %d code 1 runvmx abort\"exitcode is not 0\" .s cr { drop } depth 1- times\n" % (t.input, t.method_id))
|
||||
|
||||
res = subprocess.run([FIFT_EXECUTABLE, self.get_runner_fif_filename], capture_output=True, timeout=10)
|
||||
exit_code = res.returncode
|
||||
stderr = str(res.stderr, "utf-8")
|
||||
stdout = str(res.stdout, "utf-8")
|
||||
stdout_lines = [x.strip() for x in stdout.split("\n")]
|
||||
stdout_lines = [x for x in stdout_lines if x != ""]
|
||||
|
||||
if exit_code != 0:
|
||||
raise FiftExecutionFailedError("fift exit_code = %d" % exit_code, stderr)
|
||||
|
||||
if len(stdout_lines) != len(self.input_output):
|
||||
raise CompareOutputError("unexpected number of fift output: %d lines, but %d testcases" % (len(stdout_lines), len(self.input_output)), stdout)
|
||||
|
||||
for i in range(len(stdout_lines)):
|
||||
self.input_output[i].check(stdout_lines, i)
|
||||
|
||||
|
||||
def run_all_tests(tests: list[str]):
|
||||
for ti in range(len(tests)):
|
||||
func_filename = tests[ti]
|
||||
print("Running test %d/%d: %s" % (ti + 1, len(tests), func_filename), file=sys.stderr)
|
||||
|
||||
artifacts_folder = os.path.join(TMP_DIR, func_filename)
|
||||
testcase = FuncTestFile(func_filename, artifacts_folder)
|
||||
try:
|
||||
if not os.path.exists(artifacts_folder):
|
||||
os.makedirs(artifacts_folder)
|
||||
testcase.parse_input_from_func_file()
|
||||
testcase.run_and_check()
|
||||
shutil.rmtree(artifacts_folder)
|
||||
|
||||
if testcase.compilation_should_fail:
|
||||
print(" OK, compilation failed as it should", file=sys.stderr)
|
||||
else:
|
||||
print(" OK, %d cases" % len(testcase.input_output), file=sys.stderr)
|
||||
except ParseInputError as e:
|
||||
print(" Error parsing input:", e, file=sys.stderr)
|
||||
exit(2)
|
||||
except FuncCompilationFailedError as e:
|
||||
print(" Error compiling func:", e, file=sys.stderr)
|
||||
print(" stderr:", file=sys.stderr)
|
||||
print(e.stderr.rstrip(), file=sys.stderr)
|
||||
exit(2)
|
||||
except FuncCompilationSucceededError as e:
|
||||
print(" Error:", e, file=sys.stderr)
|
||||
exit(2)
|
||||
except FiftExecutionFailedError as e:
|
||||
print(" Error executing fift:", e, file=sys.stderr)
|
||||
print(" stderr:", file=sys.stderr)
|
||||
print(e.stderr.rstrip(), file=sys.stderr)
|
||||
print(" compiled.fif at:", testcase.get_compiled_fif_filename(), file=sys.stderr)
|
||||
exit(2)
|
||||
except CompareOutputError as e:
|
||||
print(" Mismatch in output:", e, file=sys.stderr)
|
||||
print(" Full output:", file=sys.stderr)
|
||||
print(e.output.rstrip(), file=sys.stderr)
|
||||
print(" Was compiled to:", testcase.get_compiled_fif_filename(), file=sys.stderr)
|
||||
exit(2)
|
||||
|
||||
|
||||
tests = CmdLineOptions(sys.argv).find_tests()
|
||||
print("Found", len(tests), "tests", file=sys.stderr)
|
||||
run_all_tests(tests)
|
||||
print("Done, %d tests" % len(tests), file=sys.stderr)
|
||||
|
|
8
crypto/func/auto-tests/tests/invalid.fc
Normal file
8
crypto/func/auto-tests/tests/invalid.fc
Normal file
|
@ -0,0 +1,8 @@
|
|||
_ main(s) {
|
||||
var (z, t) = ;
|
||||
|
||||
{-
|
||||
@compilation_should_fail
|
||||
@stderr identifier expected instead of `;`
|
||||
@stderr var (z, t) = ;
|
||||
-}
|
|
@ -217,7 +217,7 @@ TESTCASE | 10000 | -1-(-1<<256) -2-(-1<<256) | 1157920892373161954235709850086
|
|||
TESTCASE | 10000 | -1-(-1<<256) 1<<255 | 81877371507464127617551201542979628307507432471243237061821853600756754782485
|
||||
TESTCASE | 10000 | 1 2 | 1
|
||||
TESTCASE | 10000 | 1 3 | 2
|
||||
TESTCASE | 10000 | 3<<254, 1<<254 | 50139445418395255283694704271811692336355250894665672355503583528635147053497
|
||||
TESTCASE | 10000 | 3<<254 1<<254 | 50139445418395255283694704271811692336355250894665672355503583528635147053497
|
||||
TESTCASE | 10000 | 3 5 | 4
|
||||
TESTCASE | 10001 | 115641670674223639132965820642403718536242645001775371762318060545014644837101-1 | 115792089237316195423570985008687907853269984665640564039457584007913129639935
|
||||
TESTCASE | 10001 | 15<<252 | 108679485937549714997960660780289583146059954551846264494610741505469565211201
|
||||
|
|
|
@ -17,7 +17,8 @@ const copyFromCString = (mod, ptr) => {
|
|||
return mod.UTF8ToString(ptr);
|
||||
};
|
||||
|
||||
async function compileFile(mod, filename) {
|
||||
/** @return {{status: string, message: string, fiftCode: string, codeBoc: string, codeHashHex: string}} */
|
||||
function compileFile(mod, filename) {
|
||||
const callbackPtr = mod.addFunction((_kind, _data, contents, error) => {
|
||||
const kind = copyFromCString(mod, _kind);
|
||||
const data = copyFromCString(mod, _data);
|
||||
|
@ -28,7 +29,7 @@ async function compileFile(mod, filename) {
|
|||
try {
|
||||
copyToCStringPtr(mod, fsSync.readFileSync(path).toString('utf-8'), contents);
|
||||
} catch (err) {
|
||||
copyToCStringPtr(mod, e.message, error);
|
||||
copyToCStringPtr(mod, err.message, error);
|
||||
}
|
||||
} else {
|
||||
copyToCStringPtr(mod, 'Unknown callback kind ' + kind, error);
|
||||
|
@ -47,14 +48,11 @@ async function compileFile(mod, filename) {
|
|||
return JSON.parse(copyFromCString(mod, responsePtr));
|
||||
}
|
||||
|
||||
const wasmModule = require(process.env.FUNCFIFTLIB_MODULE)
|
||||
async function compileWasm(fiftFuncLibJsFileName, fiftFuncLibWasmFileName) {
|
||||
const wasmModule = require(fiftFuncLibJsFileName)
|
||||
const wasmBinary = new Uint8Array(fsSync.readFileSync(fiftFuncLibWasmFileName))
|
||||
|
||||
const wasmBinary = new Uint8Array(fsSync.readFileSync(process.env.FUNCFIFTLIB_WASM))
|
||||
|
||||
async function compileWasm() {
|
||||
const mod = await wasmModule({ wasmBinary })
|
||||
|
||||
return mod
|
||||
return await wasmModule({ wasmBinary })
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue