Adding in curl and openssl repos

This commit is contained in:
2025-08-14 12:09:30 -04:00
parent af2117b574
commit 0ace93e303
21174 changed files with 3607720 additions and 2 deletions

View File

@@ -0,0 +1,280 @@
#! /usr/bin/env perl
# Copyright 2021-2025 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# This module implements support for Armv8 SM3 instructions
# $output is the last argument if it looks like a file (it has an extension)
# $flavour is the first argument if it doesn't look like a file
$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
die "can't locate arm-xlate.pl";
open OUT,"| \"$^X\" $xlate $flavour \"$output\""
or die "can't call $xlate: $!";
*STDOUT=*OUT;
# Message expanding:
# Wj <- P1(W[j-16]^W[j-9]^(W[j-3]<<<15))^(W[j-13]<<<7)^W[j-6]
# Input: s0, s1, s2, s3
# s0 = w0 | w1 | w2 | w3
# s1 = w4 | w5 | w6 | w7
# s2 = w8 | w9 | w10 | w11
# s3 = w12 | w13 | w14 | w15
# Output: s4
sub msg_exp () {
my $s0 = shift;
my $s1 = shift;
my $s2 = shift;
my $s3 = shift;
my $s4 = shift;
my $vtmp1 = shift;
my $vtmp2 = shift;
$code.=<<___;
// s4 = w7 | w8 | w9 | w10
ext $s4.16b, $s1.16b, $s2.16b, #12
// vtmp1 = w3 | w4 | w5 | w6
ext $vtmp1.16b, $s0.16b, $s1.16b, #12
// vtmp2 = w10 | w11 | w12 | w13
ext $vtmp2.16b, $s2.16b, $s3.16b, #8
sm3partw1 $s4.4s, $s0.4s, $s3.4s
sm3partw2 $s4.4s, $vtmp2.4s, $vtmp1.4s
___
}
# A round of compresson function
# Input:
# ab - choose instruction among sm3tt1a, sm3tt1b, sm3tt2a, sm3tt2b
# vstate0 - vstate1, store digest status(A - H)
# vconst0 - vconst1, interleaved used to store Tj <<< j
# vtmp - temporary register
# vw - for sm3tt1ab, vw = s0 eor s1
# s0 - for sm3tt2ab, just be s0
# i, choose wj' or wj from vw
sub round () {
my $ab = shift;
my $vstate0 = shift;
my $vstate1 = shift;
my $vconst0 = shift;
my $vconst1 = shift;
my $vtmp = shift;
my $vw = shift;
my $s0 = shift;
my $i = shift;
$code.=<<___;
sm3ss1 $vtmp.4s, $vstate0.4s, $vconst0.4s, $vstate1.4s
shl $vconst1.4s, $vconst0.4s, #1
sri $vconst1.4s, $vconst0.4s, #31
sm3tt1$ab $vstate0.4s, $vtmp.4s, $vw.4s[$i]
sm3tt2$ab $vstate1.4s, $vtmp.4s, $s0.4s[$i]
___
}
sub qround () {
my $ab = shift;
my $vstate0 = shift;
my $vstate1 = shift;
my $vconst0 = shift;
my $vconst1 = shift;
my $vtmp1 = shift;
my $vtmp2 = shift;
my $s0 = shift;
my $s1 = shift;
my $s2 = shift;
my $s3 = shift;
my $s4 = shift;
if($s4) {
&msg_exp($s0, $s1, $s2, $s3, $s4, $vtmp1, $vtmp2);
}
$code.=<<___;
eor $vtmp1.16b, $s0.16b, $s1.16b
___
&round($ab, $vstate0, $vstate1, $vconst0, $vconst1, $vtmp2,
$vtmp1, $s0, 0);
&round($ab, $vstate0, $vstate1, $vconst1, $vconst0, $vtmp2,
$vtmp1, $s0, 1);
&round($ab, $vstate0, $vstate1, $vconst0, $vconst1, $vtmp2,
$vtmp1, $s0, 2);
&round($ab, $vstate0, $vstate1, $vconst1, $vconst0, $vtmp2,
$vtmp1, $s0, 3);
}
$code=<<___;
#include "arm_arch.h"
.text
___
{{{
my ($pstate,$pdata,$num)=("x0","x1","w2");
my ($state1,$state2)=("v5","v6");
my ($sconst1, $sconst2)=("s16","s17");
my ($vconst1, $vconst2)=("v16","v17");
my ($s0,$s1,$s2,$s3,$s4)=map("v$_",(0..4));
my ($bkstate1,$bkstate2)=("v18","v19");
my ($vconst_tmp1,$vconst_tmp2)=("v20","v21");
my ($vtmp1,$vtmp2)=("v22","v23");
my $constaddr="x8";
# void ossl_hwsm3_block_data_order(SM3_CTX *c, const void *p, size_t num)
$code.=<<___;
.globl ossl_hwsm3_block_data_order
.type ossl_hwsm3_block_data_order,%function
.align 5
ossl_hwsm3_block_data_order:
AARCH64_VALID_CALL_TARGET
// load state
ld1 {$state1.4s-$state2.4s}, [$pstate]
rev64 $state1.4s, $state1.4s
rev64 $state2.4s, $state2.4s
ext $state1.16b, $state1.16b, $state1.16b, #8
ext $state2.16b, $state2.16b, $state2.16b, #8
adr $constaddr, .Tj
ldp $sconst1, $sconst2, [$constaddr]
.Loop:
// load input
ld1 {$s0.4s-$s3.4s}, [$pdata], #64
sub $num, $num, #1
mov $bkstate1.16b, $state1.16b
mov $bkstate2.16b, $state2.16b
#ifndef __AARCH64EB__
rev32 $s0.16b, $s0.16b
rev32 $s1.16b, $s1.16b
rev32 $s2.16b, $s2.16b
rev32 $s3.16b, $s3.16b
#endif
ext $vconst_tmp1.16b, $vconst1.16b, $vconst1.16b, #4
___
&qround("a",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s0,$s1,$s2,$s3,$s4);
&qround("a",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s1,$s2,$s3,$s4,$s0);
&qround("a",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s2,$s3,$s4,$s0,$s1);
&qround("a",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s3,$s4,$s0,$s1,$s2);
$code.=<<___;
ext $vconst_tmp1.16b, $vconst2.16b, $vconst2.16b, #4
___
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s4,$s0,$s1,$s2,$s3);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s0,$s1,$s2,$s3,$s4);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s1,$s2,$s3,$s4,$s0);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s2,$s3,$s4,$s0,$s1);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s3,$s4,$s0,$s1,$s2);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s4,$s0,$s1,$s2,$s3);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s0,$s1,$s2,$s3,$s4);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s1,$s2,$s3,$s4,$s0);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s2,$s3,$s4,$s0,$s1);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s3,$s4);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s4,$s0);
&qround("b",$state1,$state2,$vconst_tmp1,$vconst_tmp2,$vtmp1,$vtmp2,
$s0,$s1);
$code.=<<___;
eor $state1.16b, $state1.16b, $bkstate1.16b
eor $state2.16b, $state2.16b, $bkstate2.16b
// any remained blocks?
cbnz $num, .Loop
// save state
rev64 $state1.4s, $state1.4s
rev64 $state2.4s, $state2.4s
ext $state1.16b, $state1.16b, $state1.16b, #8
ext $state2.16b, $state2.16b, $state2.16b, #8
st1 {$state1.4s-$state2.4s}, [$pstate]
ret
.size ossl_hwsm3_block_data_order,.-ossl_hwsm3_block_data_order
.align 3
.Tj:
.word 0x79cc4519, 0x9d8a7a87
___
}}}
#########################################
my %sm3partopcode = (
"sm3partw1" => 0xce60C000,
"sm3partw2" => 0xce60C400);
my %sm3ss1opcode = (
"sm3ss1" => 0xce400000);
my %sm3ttopcode = (
"sm3tt1a" => 0xce408000,
"sm3tt1b" => 0xce408400,
"sm3tt2a" => 0xce408800,
"sm3tt2b" => 0xce408C00);
sub unsm3part {
my ($mnemonic,$arg)=@_;
$arg=~ m/[qv](\d+)[^,]*,\s*[qv](\d+)[^,]*,\s*[qv](\d+)/o
&&
sprintf ".inst\t0x%08x\t//%s %s",
$sm3partopcode{$mnemonic}|$1|($2<<5)|($3<<16),
$mnemonic,$arg;
}
sub unsm3ss1 {
my ($mnemonic,$arg)=@_;
$arg=~ m/[qv](\d+)[^,]*,\s*[qv](\d+)[^,]*,\s*[qv](\d+)[^,]*,\s*[qv](\d+)/o
&&
sprintf ".inst\t0x%08x\t//%s %s",
$sm3ss1opcode{$mnemonic}|$1|($2<<5)|($3<<16)|($4<<10),
$mnemonic,$arg;
}
sub unsm3tt {
my ($mnemonic,$arg)=@_;
$arg=~ m/[qv](\d+)[^,]*,\s*[qv](\d+)[^,]*,\s*[qv](\d+)[^,]*\[([0-3])\]/o
&&
sprintf ".inst\t0x%08x\t//%s %s",
$sm3ttopcode{$mnemonic}|$1|($2<<5)|($3<<16)|($4<<12),
$mnemonic,$arg;
}
open SELF,$0;
while(<SELF>) {
next if (/^#!/);
last if (!s/^#/\/\// and !/^$/);
print;
}
close SELF;
foreach(split("\n",$code)) {
s/\`([^\`]*)\`/eval($1)/ge;
s/\b(sm3partw[1-2])\s+([qv].*)/unsm3part($1,$2)/ge;
s/\b(sm3ss1)\s+([qv].*)/unsm3ss1($1,$2)/ge;
s/\b(sm3tt[1-2][a-b])\s+([qv].*)/unsm3tt($1,$2)/ge;
print $_,"\n";
}
close STDOUT or die "error closing STDOUT: $!";

View File

@@ -0,0 +1,228 @@
#! /usr/bin/env perl
# This file is dual-licensed, meaning that you can use it under your
# choice of either of the following two licenses:
#
# Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You can obtain
# a copy in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# or
#
# Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
# Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The generated code of this file depends on the following RISC-V extensions:
# - RV64I
# - RISC-V Vector ('V') with VLEN >= 128
# - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
# - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
use strict;
use warnings;
use FindBin qw($Bin);
use lib "$Bin";
use lib "$Bin/../../perlasm";
use riscv;
# $output is the last argument if it looks like a file (it has an extension)
# $flavour is the first argument if it doesn't look like a file
my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
$output and open STDOUT,">$output";
my $code=<<___;
.text
___
################################################################################
# ossl_hwsm3_block_data_order_zvksh(SM3_CTX *c, const void *p, size_t num);
{
my ($CTX, $INPUT, $NUM) = ("a0", "a1", "a2");
my ($V0, $V1, $V2, $V3, $V4, $V5, $V6, $V7,
$V8, $V9, $V10, $V11, $V12, $V13, $V14, $V15,
$V16, $V17, $V18, $V19, $V20, $V21, $V22, $V23,
$V24, $V25, $V26, $V27, $V28, $V29, $V30, $V31,
) = map("v$_",(0..31));
$code .= <<___;
.text
.p2align 3
.globl ossl_hwsm3_block_data_order_zvksh
.type ossl_hwsm3_block_data_order_zvksh,\@function
ossl_hwsm3_block_data_order_zvksh:
@{[vsetivli "zero", 8, "e32", "m2", "ta", "ma"]}
# Load initial state of hash context (c->A-H).
@{[vle32_v $V0, $CTX]}
@{[vrev8_v $V0, $V0]}
L_sm3_loop:
# Copy the previous state to v2.
# It will be XOR'ed with the current state at the end of the round.
@{[vmv_v_v $V2, $V0]}
# Load the 64B block in 2x32B chunks.
@{[vle32_v $V6, $INPUT]} # v6 := {w7, ..., w0}
addi $INPUT, $INPUT, 32
@{[vle32_v $V8, $INPUT]} # v8 := {w15, ..., w8}
addi $INPUT, $INPUT, 32
addi $NUM, $NUM, -1
# As vsm3c consumes only w0, w1, w4, w5 we need to slide the input
# 2 elements down so we process elements w2, w3, w6, w7
# This will be repeated for each odd round.
@{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w7, ..., w2}
@{[vsm3c_vi $V0, $V6, 0]}
@{[vsm3c_vi $V0, $V4, 1]}
# Prepare a vector with {w11, ..., w4}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w7, ..., w4}
@{[vslideup_vi $V4, $V8, 4]} # v4 := {w11, w10, w9, w8, w7, w6, w5, w4}
@{[vsm3c_vi $V0, $V4, 2]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w11, w10, w9, w8, w7, w6}
@{[vsm3c_vi $V0, $V4, 3]}
@{[vsm3c_vi $V0, $V8, 4]}
@{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w15, w14, w13, w12, w11, w10}
@{[vsm3c_vi $V0, $V4, 5]}
@{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w23, w22, w21, w20, w19, w18, w17, w16}
# Prepare a register with {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w15, w14, w13, w12}
@{[vslideup_vi $V4, $V6, 4]} # v4 := {w19, w18, w17, w16, w15, w14, w13, w12}
@{[vsm3c_vi $V0, $V4, 6]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w19, w18, w17, w16, w15, w14}
@{[vsm3c_vi $V0, $V4, 7]}
@{[vsm3c_vi $V0, $V6, 8]}
@{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w23, w22, w21, w20, w19, w18}
@{[vsm3c_vi $V0, $V4, 9]}
@{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w31, w30, w29, w28, w27, w26, w25, w24}
# Prepare a register with {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w23, w22, w21, w20}
@{[vslideup_vi $V4, $V8, 4]} # v4 := {w27, w26, w25, w24, w23, w22, w21, w20}
@{[vsm3c_vi $V0, $V4, 10]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w27, w26, w25, w24, w23, w22}
@{[vsm3c_vi $V0, $V4, 11]}
@{[vsm3c_vi $V0, $V8, 12]}
@{[vslidedown_vi $V4, $V8, 2]} # v4 := {x, X, w31, w30, w29, w28, w27, w26}
@{[vsm3c_vi $V0, $V4, 13]}
@{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w32, w33, w34, w35, w36, w37, w38, w39}
# Prepare a register with {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w31, w30, w29, w28}
@{[vslideup_vi $V4, $V6, 4]} # v4 := {w35, w34, w33, w32, w31, w30, w29, w28}
@{[vsm3c_vi $V0, $V4, 14]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w35, w34, w33, w32, w31, w30}
@{[vsm3c_vi $V0, $V4, 15]}
@{[vsm3c_vi $V0, $V6, 16]}
@{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w39, w38, w37, w36, w35, w34}
@{[vsm3c_vi $V0, $V4, 17]}
@{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w47, w46, w45, w44, w43, w42, w41, w40}
# Prepare a register with {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w39, w38, w37, w36}
@{[vslideup_vi $V4, $V8, 4]} # v4 := {w43, w42, w41, w40, w39, w38, w37, w36}
@{[vsm3c_vi $V0, $V4, 18]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w43, w42, w41, w40, w39, w38}
@{[vsm3c_vi $V0, $V4, 19]}
@{[vsm3c_vi $V0, $V8, 20]}
@{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w47, w46, w45, w44, w43, w42}
@{[vsm3c_vi $V0, $V4, 21]}
@{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w55, w54, w53, w52, w51, w50, w49, w48}
# Prepare a register with {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w47, w46, w45, w44}
@{[vslideup_vi $V4, $V6, 4]} # v4 := {w51, w50, w49, w48, w47, w46, w45, w44}
@{[vsm3c_vi $V0, $V4, 22]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w51, w50, w49, w48, w47, w46}
@{[vsm3c_vi $V0, $V4, 23]}
@{[vsm3c_vi $V0, $V6, 24]}
@{[vslidedown_vi $V4, $V6, 2]} # v4 := {X, X, w55, w54, w53, w52, w51, w50}
@{[vsm3c_vi $V0, $V4, 25]}
@{[vsm3me_vv $V8, $V6, $V8]} # v8 := {w63, w62, w61, w60, w59, w58, w57, w56}
# Prepare a register with {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w55, w54, w53, w52}
@{[vslideup_vi $V4, $V8, 4]} # v4 := {w59, w58, w57, w56, w55, w54, w53, w52}
@{[vsm3c_vi $V0, $V4, 26]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w59, w58, w57, w56, w55, w54}
@{[vsm3c_vi $V0, $V4, 27]}
@{[vsm3c_vi $V0, $V8, 28]}
@{[vslidedown_vi $V4, $V8, 2]} # v4 := {X, X, w63, w62, w61, w60, w59, w58}
@{[vsm3c_vi $V0, $V4, 29]}
@{[vsm3me_vv $V6, $V8, $V6]} # v6 := {w71, w70, w69, w68, w67, w66, w65, w64}
# Prepare a register with {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, X, X, w63, w62, w61, w60}
@{[vslideup_vi $V4, $V6, 4]} # v4 := {w67, w66, w65, w64, w63, w62, w61, w60}
@{[vsm3c_vi $V0, $V4, 30]}
@{[vslidedown_vi $V4, $V4, 2]} # v4 := {X, X, w67, w66, w65, w64, w63, w62}
@{[vsm3c_vi $V0, $V4, 31]}
# XOR in the previous state.
@{[vxor_vv $V0, $V0, $V2]}
bnez $NUM, L_sm3_loop # Check if there are any more block to process
L_sm3_end:
@{[vrev8_v $V0, $V0]}
@{[vse32_v $V0, $CTX]}
ret
.size ossl_hwsm3_block_data_order_zvksh,.-ossl_hwsm3_block_data_order_zvksh
___
}
print $code;
close STDOUT or die "error closing STDOUT: $!";