diff --git a/Makefile b/Makefile index 59e6187dd..c183a05a1 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,7 @@ update-dependencies: imgs/ol-min: ${LAMBDA_FILES} ${MAKE} -C min-image - docker build -t ol-min min-image + docker build --no-cache -t ol-min min-image touch imgs/ol-min imgs/ol-wasm: imgs/ol-min wasm-image/runtimes/native/src/main.rs diff --git a/call_matrix_cols.csv b/call_matrix_cols.csv new file mode 100644 index 000000000..3b6c4a1fc --- /dev/null +++ b/call_matrix_cols.csv @@ -0,0 +1,2 @@ +,absl-py==1.4.0,aiohttp==3.8.5,aiosignal==1.3.1,alabaster==0.7.13,alembic==1.12.0,anyio==4.0.0,appdirs==1.4.4,argon2-cffi-bindings==21.2.0,argon2-cffi==23.1.0,argparse==1.4.0,arrow==1.2.3,asgiref==3.7.2,astroid==2.15.6,asttokens==2.4.0,astunparse==1.6.3,async-lru==2.0.4,async-timeout==4.0.3,attrs==23.1.0,autopage==0.5.1,babel==2.12.1,backcall==0.2.0,bcrypt==4.0.1,beautifulsoup4==4.12.2,black==23.9.1,bleach==6.0.0,blinker==1.6.2,boto3==1.28.48,botocore==1.31.48,breathe==4.35.0,bs4==0.0.1,cachetools==5.3.1,certifi==2021.10.8,certifi==2022.6.15,certifi==2022.9.24,certifi==2023.7.22,cffi==1.15.1,chardet==3.0.4,chardet==4.0.0,chardet==5.2.0,charset-normalizer==2.0.12,charset-normalizer==2.1.1,charset-normalizer==3.2.0,click==7.1.2,click==8.0.4,click==8.1.7,cliff==4.3.0,cloudpickle==2.2.1,cmd2==2.4.3,colorama==0.4.6,comm==0.1.4,commonmark==0.9.1,contextlib2==21.6.0,contourpy==1.1.0,contourpy==1.1.1,coverage==7.3.1,cryptography==38.0.3,cryptography==41.0.3,cssselect==1.2.0,cycler==0.11.0,cython==3.0.2,debtcollector==2.5.0,debugpy==1.8.0,decorator==5.1.1,defusedxml==0.7.1,deprecation==2.1.0,dill==0.3.7,distlib==0.3.7,distro==1.8.0,django==4.2.5,dm-tree==0.1.8,dnspython==2.4.2,docker==6.1.3,docopt==0.6.2,docutils==0.16,docutils==0.17.1,docutils==0.18.1,docutils==0.19,docutils==0.20.1,dulwich==0.21.6,entrypoints==0.4,et-xmlfile==1.1.0,eventlet==0.33.3,exceptiongroup==1.1.3,executing==1.2.0,fastjsonschema==2.18.0,filelock==3.12.4,fixtures==4.1.0,flake8==6.1.0,flask==2.0.3,flask==2.1.0,flask==2.2.2,flask==2.3.3,fonttools==4.42.1,fqdn==1.5.1,frozenlist==1.4.0,future==0.18.3,ghp-import==2.1.0,gitdb==4.0.10,gitpython==3.1.36,greenlet==2.0.2,grpcio-tools==1.58.0,grpcio==1.58.0,gunicorn==20.1.0,gunicorn==21.2.0,h5py==3.9.0,html5lib==1.1,httplib2==0.22.0,idna==2.10,idna==2.7,idna==3.3,idna==3.4,imageio==2.31.3,imagesize==1.4.1,importlib-metadata==6.8.0,iniconfig==2.0.0,ipykernel==6.25.2,ipython-genutils==0.2.0,ipython==8.15.0,ipywidgets==8.1.1,iso8601==2.0.0,isodate==0.6.1,isoduration==20.11.0,isort==5.12.0,itsdangerous==2.1.2,jax==0.4.14,jaxlib==0.4.14,jedi==0.19.0,jinja2==2.11.3,jinja2==3.0.3,jinja2==3.1.2,jmespath==1.0.1,joblib==1.3.2,json5==0.9.14,jsonpatch==1.33,jsonpointer==2.4,jsonschema-specifications==2023.7.1,jsonschema==4.19.0,jupyter-client==8.3.1,jupyter-console==6.6.3,jupyter-core==5.3.1,jupyter-events==0.7.0,jupyter-lsp==2.2.0,jupyter-server-terminals==0.4.4,jupyter-server==2.7.3,jupyter==1.0.0,jupyterlab-pygments==0.2.2,jupyterlab-server==2.25.0,jupyterlab-widgets==3.0.9,jupyterlab==4.0.6,keystoneauth1==5.3.0,kiwisolver==1.4.5,latexcodec==2.0.1,lazy-object-proxy==1.9.0,livereload==2.6.3,llvmlite==0.40.1,lxml==4.9.3,mako==1.2.4,markdown-it-py==2.2.0,markdown==3.4.4,markupsafe==2.0.1,markupsafe==2.1.3,marshmallow==3.20.1,matplotlib-inline==0.1.6,matplotlib==3.8.0,mccabe==0.7.0,mdit-py-plugins==0.4.0,mdurl==0.1.2,mergedeep==1.3.4,mistune==0.8.4,mistune==3.0.1,mkdocs-material-extensions==1.1.1,mkdocs==1.5.2,ml-dtypes==0.2.0,mock==5.1.0,more-itertools==9.0.0,mpmath==1.3.0,msgpack==1.0.5,multidict==6.0.4,mypy-extensions==1.0.0,nbclient==0.8.0,nbconvert==7.8.0,nbformat==5.9.2,nbsphinx==0.9.3,nest-asyncio==1.5.7,netaddr==0.8.0,networkx==3.1,nose==1.3.7,notebook-shim==0.2.3,notebook==7.0.3,numba==0.57.1,numpy==1.24.4,numpy==1.25.2,numpy==1.26.0,numpydoc==1.5.0,oauth2client==4.1.3,oauthlib==3.2.2,openpyxl==3.1.2,openstackdocstheme==3.2.0,opt-einsum==3.3.0,os-service-types==1.7.0,oslo-config==9.2.0,oslo-i18n==6.1.0,overrides==7.4.0,packaging==21.3,packaging==23.1,pandas==2.1.0,pandocfilters==1.5.0,paramiko==3.3.1,parso==0.8.3,pathspec==0.11.2,pbr==5.11.1,pexpect==4.8.0,pickleshare==0.7.5,pillow==10.0.0,pillow==10.0.1,platformdirs==3.10.0,pluggy==1.3.0,ply==3.11,prettytable==3.9.0,prometheus-client==0.17.1,promise==2.3,prompt-toolkit==3.0.39,protobuf==3.20.3,protobuf==4.24.3,psutil==5.9.5,psycopg2-binary==2.9.7,ptyprocess==0.7.0,pure-eval==0.2.2,py==1.11.0,pyarrow==13.0.0,pyasn1-modules==0.3.0,pyasn1==0.4.8,pyasn1==0.5.0,pybtex==0.24.0,pycodestyle==2.11.0,pycparser==2.21,pydot==1.4.2,pyflakes==3.1.0,pygments==2.16.1,pyjwt==2.4.0,pyjwt==2.8.0,pylint==2.17.5,pymdown-extensions==10.3,pymongo==4.5.0,pynacl==1.5.0,pyopenssl==23.2.0,pyparsing==2.4.7,pyparsing==3.0.9,pyparsing==3.1.1,pyperclip==1.8.2,pyproject-api==1.6.1,pyrsistent==0.19.3,pyserial==3.5,pysocks==1.7.1,pytest-cov==4.1.0,pytest==7.4.2,python-dateutil==2.8.2,python-json-logger==2.0.7,python-slugify==8.0.1,pytz==2023.3.post1,pyyaml-env-tag==0.1,pyyaml==6.0,pyyaml==6.0.1,pyzmq==25.1.1,qtconsole==5.4.4,qtpy==2.4.0,recommonmark==0.7.1,redis==5.0.0,referencing==0.30.2,regex==2023.8.8,reno==4.0.0,requests-oauthlib==1.3.1,requests-toolbelt==0.10.1,requests-toolbelt==1.0.0,requests==2.20.0,requests==2.25.1,requests==2.26.0,requests==2.27.1,requests==2.28.1,requests==2.31.0,rfc3339-validator==0.1.4,rfc3986-validator==0.1.1,rfc3986==2.0.0,rich==12.6.0,rpds-py==0.10.3,rsa==4.9,ruamel-yaml-clib==0.2.7,ruamel-yaml==0.17.32,s3transfer==0.6.2,scikit-learn==1.3.0,scipy==1.11.2,seaborn==0.12.2,send2trash==1.8.2,shapely==2.0.1,simplejson==3.19.1,six==1.16.0,smmap==5.0.0,sniffio==1.3.0,snowballstemmer==2.2.0,soupsieve==2.5,sphinx-autobuild==2021.3.14,sphinx-copybutton==0.5.2,sphinx-gallery==0.14.0,sphinx-rtd-theme==1.3.0,sphinx==4.5.0,sphinx==5.3.0,sphinx==7.2.6,sphinxcontrib-applehelp==1.0.4,sphinxcontrib-applehelp==1.0.7,sphinxcontrib-devhelp==1.0.2,sphinxcontrib-devhelp==1.0.5,sphinxcontrib-htmlhelp==2.0.1,sphinxcontrib-htmlhelp==2.0.4,sphinxcontrib-jquery==4.1,sphinxcontrib-jsmath==1.0.1,sphinxcontrib-qthelp==1.0.3,sphinxcontrib-qthelp==1.0.6,sphinxcontrib-serializinghtml==1.1.5,sphinxcontrib-serializinghtml==1.1.9,sqlalchemy==2.0.20,sqlparse==0.4.4,stack-data==0.6.2,stevedore==5.1.0,sympy==1.12,tabulate==0.9.0,tenacity==8.2.3,termcolor==2.3.0,terminado==0.17.1,testresources==2.0.1,text-unidecode==1.3,threadpoolctl==3.2.0,tinycss2==1.2.1,toml==0.10.2,tomli==2.0.1,tomlkit==0.12.1,toolz==0.12.0,tornado==6.3.3,tox==4.11.3,tqdm==4.66.1,traitlets==5.10.0,typing-extensions==4.4.0,typing-extensions==4.7.1,tzdata==2023.3,tzlocal==5.0.1,unidecode==1.3.6,uri-template==1.3.0,uritemplate==4.1.1,urllib3==1.24.3,urllib3==1.26.12,urllib3==1.26.16,urllib3==1.26.9,urllib3==2.0.4,virtualenv==20.24.5,watchdog==3.0.0,wcwidth==0.2.6,webcolors==1.13,webencodings==0.5.1,websocket-client==1.6.3,werkzeug==2.3.7,wheel==0.37.1,wheel==0.41.2,widgetsnbextension==4.0.9,wrapt==1.15.0,xlrd==2.0.1,xmltodict==0.13.0,yarl==1.9.2,zipp==3.16.2 +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/call_matrix_sample.csv b/call_matrix_sample.csv new file mode 100644 index 000000000..752bc5f86 --- /dev/null +++ b/call_matrix_sample.csv @@ -0,0 +1,2 @@ +,asgiref,blinker,certifi,charset-normalizer,click,contourpy,cycler,django,dnspython,flask,fonttools,greenlet,idna,itsdangerous,jinja2,kiwisolver,markupsafe,matplotlib,mock,numpy,packaging,pandas,pillow,protobuf,pyparsing,pyqt5,pyqt5-qt5,pyqt5-sip,python-dateutil,pytz,requests,scipy,simplejson,six,sqlalchemy,sqlparse,typing-extensions,tzdata,urllib3,werkzeug +2400,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 diff --git a/centroids.json b/centroids.json new file mode 100644 index 000000000..72e1f3aa9 --- /dev/null +++ b/centroids.json @@ -0,0 +1,212 @@ +[ + [ + 0.0612244897959182, + 0.06462585034013613, + 0.08163265306122454, + 0.08163265306122454, + 0.06462585034013613, + 1.0000000000000004, + 1.0000000000000004, + 0.0612244897959182, + 0.04761904761904749, + 0.06462585034013613, + 1.0000000000000004, + 0.06462585034013585, + 0.08163265306122454, + 0.06462585034013613, + 0.10884353741496644, + 1.0000000000000004, + 0.17687074829932, + 1.0000000000000004, + 0.07142857142857155, + 0.9999999999999996, + 1.0000000000000004, + 0.04761904761904753, + 1.0000000000000004, + 0.06802721088435376, + 1.0000000000000004, + 0.09523809523809529, + 0.09523809523809529, + 0.09523809523809529, + 1.0000000000000027, + 0.04761904761904753, + 0.08163265306122454, + 0.09523809523809534, + 0.057823129251700786, + 0.9999999999999978, + 0.06462585034013585, + 0.0612244897959182, + 0.12585034013605437, + 0.04761904761904753, + 0.08163265306122454, + 0.1326530612244896 + ], + [ + 0.16462093862815924, + -1.3877787807814457e-16, + -1.8041124150158794e-16, + -1.8041124150158794e-16, + -1.3877787807814457e-16, + -1.8041124150158794e-16, + -1.8041124150158794e-16, + 0.16462093862815924, + 0.17472924187725636, + -1.3877787807814457e-16, + -1.8041124150158794e-16, + 0.1566787003610105, + -1.8041124150158794e-16, + -1.3877787807814457e-16, + 0.15812274368230983, + -1.8041124150158794e-16, + 0.3155234657039713, + -1.8041124150158794e-16, + 0.16534296028880838, + 0.3234657039711183, + -1.8041124150158794e-16, + 1.2490009027033011e-15, + -1.8041124150158794e-16, + 0.16462093862815813, + -1.8041124150158794e-16, + 0.1537906137184113, + 0.1537906137184113, + 0.1537906137184113, + 5.551115123125783e-16, + 1.2490009027033011e-15, + -1.8041124150158794e-16, + 0.1638989169675092, + 0.17833935018050562, + 0.1725631768953051, + 0.1566787003610105, + 0.16462093862815924, + 0.31046931407942335, + 1.2490009027033011e-15, + -1.8041124150158794e-16, + 0.17256317689530629 + ], + [ + 0.08965517241379298, + 1.000000000000001, + 0.07241379310344814, + 0.07241379310344814, + 1.000000000000001, + 1.942890293094024e-16, + 1.942890293094024e-16, + 0.08965517241379298, + 0.07931034482758614, + 1.000000000000001, + 1.942890293094024e-16, + 0.058620689655172184, + 0.07241379310344814, + 1.000000000000001, + 1.0000000000000013, + 1.942890293094024e-16, + 1.0000000000000009, + 1.942890293094024e-16, + 0.0517241379310345, + 0.21724137931034537, + 1.942890293094024e-16, + 0.08620689655172409, + 1.942890293094024e-16, + 0.0517241379310345, + 1.942890293094024e-16, + 0.06551724137931049, + 0.06551724137931049, + 0.06551724137931049, + 0.08620689655172387, + 0.08620689655172409, + 0.07241379310344814, + 0.07241379310344831, + 0.0724137931034484, + 0.1862068965517243, + 0.058620689655172184, + 0.08965517241379298, + 0.1482758620689652, + 0.08620689655172409, + 0.07241379310344814, + 1.0000000000000022 + ], + [ + 0.08583690987124448, + 1.8041124150158794e-16, + 1.0000000000000007, + 1.0000000000000007, + 1.8041124150158794e-16, + 1.1102230246251565e-16, + 1.1102230246251565e-16, + 0.08583690987124448, + 0.08154506437768237, + 1.8041124150158794e-16, + 1.1102230246251565e-16, + 0.07296137339055785, + 1.0000000000000007, + 1.8041124150158794e-16, + 0.09012875536480724, + 1.1102230246251565e-16, + 0.16738197424892728, + 1.1102230246251565e-16, + 0.08583690987124465, + 0.14592274678111644, + 1.1102230246251565e-16, + -1.3877787807814457e-16, + 1.1102230246251565e-16, + 0.07725321888412019, + 1.1102230246251565e-16, + 0.09871244635193144, + 0.09871244635193144, + 0.09871244635193144, + -3.0531133177191805e-16, + -1.3877787807814457e-16, + 1.0000000000000007, + 0.08154506437768244, + 0.072961373390558, + 0.11158798283261806, + 0.07296137339055785, + 0.08583690987124448, + 0.1587982832618024, + -1.3877787807814457e-16, + 1.0000000000000007, + 0.07725321888412023 + ], + [ + 0.08724832214765094, + 1.942890293094024e-16, + 0.0536912751677852, + 0.0536912751677852, + 1.942890293094024e-16, + 2.0816681711721685e-16, + 2.0816681711721685e-16, + 0.08724832214765094, + 0.060402684563758274, + 1.942890293094024e-16, + 2.0816681711721685e-16, + 0.07718120805369114, + 0.0536912751677852, + 1.942890293094024e-16, + 0.0738255033557052, + 2.0816681711721685e-16, + 0.12416107382550387, + 2.0816681711721685e-16, + 0.09060402684563759, + 0.9999999999999994, + 2.0816681711721685e-16, + 1.0000000000000009, + 2.0816681711721685e-16, + 0.07382550335570472, + 2.0816681711721685e-16, + 0.080536912751678, + 0.080536912751678, + 0.080536912751678, + 1.0000000000000027, + 1.0000000000000009, + 0.0536912751677852, + 0.11409395973154361, + 0.08724832214765105, + 0.9999999999999978, + 0.07718120805369114, + 0.08724832214765094, + 0.1644295302013421, + 1.0000000000000009, + 0.0536912751677852, + 0.05033557046979842 + ] +] \ No newline at end of file diff --git a/centroids_kmeans.json b/centroids_kmeans.json new file mode 100644 index 000000000..c3da9efc9 --- /dev/null +++ b/centroids_kmeans.json @@ -0,0 +1 @@ +[[0.0697674418604651, 1.0000000000000018, 0.11295681063122925, 0.11295681063122925, 1.0000000000000018, 2.7755575615628914e-17, 2.7755575615628914e-17, 0.0697674418604651, 0.056478405315614655, 1.0000000000000018, 2.7755575615628914e-17, 0.08970099667774092, 0.11295681063122925, 1.0000000000000018, 1.0000000000000018, 2.7755575615628914e-17, 0.9999999999999993, 2.7755575615628914e-17, 0.0697674418604653, 0.18936877076411995, 2.7755575615628914e-17, 0.03322259136212609, 2.7755575615628914e-17, 0.09302325581395357, 2.7755575615628914e-17, 0.04983388704318953, 0.04983388704318953, 0.04983388704318953, 0.03322259136212666, 0.03322259136212609, 0.11295681063122925, 0.08637873754152822, 0.056478405315614724, 0.13621262458471695, 0.08970099667774092, 0.0697674418604651, 0.15946843853820597, 0.03322259136212609, 0.11295681063122925, 0.9999999999999984], [0.1518438177874185, 5.273559366969494e-16, -6.522560269672795e-16, -6.522560269672795e-16, 5.273559366969494e-16, -1.942890293094024e-16, -1.942890293094024e-16, 0.1518438177874185, 0.18148951554591444, 5.273559366969494e-16, -1.942890293094024e-16, 0.18221258134490254, -6.522560269672795e-16, 5.273559366969494e-16, 0.17353579175705042, -1.942890293094024e-16, 0.3362255965292843, -1.942890293094024e-16, 0.14822848879247985, 0.31525668835864074, -1.942890293094024e-16, 5.273559366969494e-16, -1.942890293094024e-16, 0.15690527838033275, -1.942890293094024e-16, 0.1655820679681848, 0.1655820679681848, 0.1655820679681848, -2.4147350785597155e-15, 5.273559366969494e-16, -6.522560269672795e-16, 0.17353579175704997, 0.15762834417932056, 0.1648590021691998, 0.18221258134490254, 0.1518438177874185, 0.31887201735357984, 5.273559366969494e-16, -6.522560269672795e-16, 0.1836587129428781], [0.1312741312741312, -2.7755575615628914e-16, -3.7470027081099033e-16, -3.7470027081099033e-16, -2.7755575615628914e-16, 2.7755575615628914e-17, 2.7755575615628914e-17, 0.1312741312741312, 0.04633204633204638, -2.7755575615628914e-16, 2.7755575615628914e-17, 0.08880308880308899, -3.7470027081099033e-16, -2.7755575615628914e-16, 0.08494208494208447, 2.7755575615628914e-17, 0.1621621621621618, 2.7755575615628914e-17, 0.06563706563706578, 1.000000000000001, 2.7755575615628914e-17, 1.0000000000000018, 2.7755575615628914e-17, 0.09266409266409273, 2.7755575615628914e-17, 0.06563706563706573, 0.06563706563706573, 0.06563706563706573, 1.0000000000000009, 1.0000000000000018, -3.7470027081099033e-16, 0.07722007722007723, 0.08494208494208494, 1.0000000000000004, 0.08880308880308899, 0.1312741312741312, 0.22007722007722016, 1.0000000000000018, -3.7470027081099033e-16, 0.0772200772200771], [0.0772532188841201, -3.0531133177191805e-16, 0.999999999999998, 0.999999999999998, -3.0531133177191805e-16, 2.7755575615628914e-17, 2.7755575615628914e-17, 0.0772532188841201, 0.09012875536480691, -3.0531133177191805e-16, 2.7755575615628914e-17, 0.07725321888412029, 0.999999999999998, -3.0531133177191805e-16, 0.06866952789699532, 2.7755575615628914e-17, 0.1673819742489267, 2.7755575615628914e-17, 0.0772532188841202, 0.20600858369098696, 2.7755575615628914e-17, 0.07725321888412022, 2.7755575615628914e-17, 0.05579399141630913, 2.7755575615628914e-17, 0.08154506437768239, 0.08154506437768239, 0.08154506437768239, 0.0772532188841202, 0.07725321888412022, 0.999999999999998, 0.03862660944205988, 0.08154506437768247, 0.16309012875536433, 0.07725321888412029, 0.0772532188841201, 0.15450643776824038, 0.07725321888412022, 0.999999999999998, 0.0987124463519313], [0.05246913580246926, 0.07407407407407401, 0.04938271604938249, 0.04938271604938249, 0.07407407407407401, 1.0000000000000016, 1.0000000000000016, 0.05246913580246926, 0.07098765432098769, 0.07407407407407401, 1.0000000000000016, 0.037037037037037326, 0.04938271604938249, 0.07407407407407401, 0.1512345679012343, 1.0000000000000016, 0.1913580246913576, 1.0000000000000016, 0.0956790123456791, 1.0, 1.0000000000000016, 0.07098765432098772, 1.0000000000000016, 0.0833333333333334, 1.0000000000000016, 0.08950617283950618, 0.08950617283950618, 0.08950617283950618, 1.0000000000000007, 0.07098765432098772, 0.04938271604938249, 0.08641975308641955, 0.04320987654321008, 1.0000000000000013, 0.037037037037037326, 0.05246913580246926, 0.08950617283950646, 0.07098765432098772, 0.04938271604938249, 0.11419753086419743]] diff --git a/centroids_kmodes.json b/centroids_kmodes.json new file mode 100644 index 000000000..491f96088 --- /dev/null +++ b/centroids_kmodes.json @@ -0,0 +1 @@ +[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] diff --git a/dep-trace.json b/dep-trace.json new file mode 100644 index 000000000..7d7a9110e --- /dev/null +++ b/dep-trace.json @@ -0,0 +1,78 @@ +{"deps": ["tzdata", "numpy", "pytz", "python-dateutil"], "name": "pandas", "top": ["pandas"], "type": "package"} +{"deps": [], "name": "tzdata", "top": ["tzdata"], "type": "package"} +{"deps": [], "name": "numpy", "top": ["numpy"], "type": "package"} +{"deps": [], "name": "pytz", "top": ["pytz"], "type": "package"} +{"deps": ["six"], "name": "python-dateutil", "top": ["dateutil"], "type": "package"} +{"deps": [], "name": "six", "top": ["six"], "type": "package"} +{"deps": ["pandas", "tzdata", "numpy", "pytz", "python-dateutil", "six"], "name": "/root/open-lambda/default-ol/worker/code/1001-fn1", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1001-fn1", "type": "invocation"} +{"deps": ["numpy"], "name": "scipy", "top": ["scipy"], "type": "package"} +{"deps": ["scipy", "numpy"], "name": "/root/open-lambda/default-ol/worker/code/1028-fn2", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1028-fn2", "type": "invocation"} +{"deps": ["contourpy", "pyparsing", "fonttools", "kiwisolver", "pillow", "cycler", "packaging", "numpy", "python-dateutil"], "name": "matplotlib", "top": ["matplotlib", "pylab"], "type": "package"} +{"deps": ["numpy"], "name": "contourpy", "top": ["contourpy"], "type": "package"} +{"deps": [], "name": "pyparsing", "top": ["pyparsing"], "type": "package"} +{"deps": [], "name": "fonttools", "top": ["fontTools"], "type": "package"} +{"deps": [], "name": "kiwisolver", "top": ["kiwisolver"], "type": "package"} +{"deps": [], "name": "pillow", "top": ["PIL"], "type": "package"} +{"deps": [], "name": "cycler", "top": ["cycler"], "type": "package"} +{"deps": [], "name": "packaging", "top": ["packaging"], "type": "package"} +{"deps": ["matplotlib", "contourpy", "pyparsing", "fonttools", "kiwisolver", "pillow", "cycler", "packaging", "numpy", "python-dateutil", "six"], "name": "/root/open-lambda/default-ol/worker/code/1032-fn3", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1032-fn3", "type": "invocation"} +{"deps": ["greenlet", "typing-extensions"], "name": "sqlalchemy", "top": ["sqlalchemy"], "type": "package"} +{"deps": [], "name": "greenlet", "top": ["greenlet"], "type": "package"} +{"deps": [], "name": "typing-extensions", "top": ["typing_extensions"], "type": "package"} +{"deps": ["sqlalchemy", "greenlet", "typing-extensions"], "name": "/root/open-lambda/default-ol/worker/code/1043-fn4", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1043-fn4", "type": "invocation"} +{"deps": ["asgiref", "sqlparse"], "name": "django", "top": ["django"], "type": "package"} +{"deps": ["typing-extensions"], "name": "asgiref", "top": ["asgiref"], "type": "package"} +{"deps": [], "name": "sqlparse", "top": ["sqlparse"], "type": "package"} +{"deps": ["django", "asgiref", "sqlparse", "typing-extensions"], "name": "/root/open-lambda/default-ol/worker/code/1052-fn5", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1052-fn5", "type": "invocation"} +{"deps": ["jinja2", "click", "werkzeug", "blinker", "itsdangerous"], "name": "flask", "top": ["flask"], "type": "package"} +{"deps": ["markupsafe"], "name": "jinja2", "top": ["jinja2"], "type": "package"} +{"deps": [], "name": "click", "top": ["click"], "type": "package"} +{"deps": ["markupsafe"], "name": "werkzeug", "top": ["werkzeug"], "type": "package"} +{"deps": [], "name": "blinker", "top": ["blinker"], "type": "package"} +{"deps": [], "name": "itsdangerous", "top": ["itsdangerous"], "type": "package"} +{"deps": [], "name": "markupsafe", "top": ["markupsafe"], "type": "package"} +{"deps": ["flask", "jinja2", "click", "werkzeug", "blinker", "itsdangerous", "markupsafe"], "name": "/root/open-lambda/default-ol/worker/code/1061-fn6", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1061-fn6", "type": "invocation"} +{"deps": ["numpy"], "name": "/root/open-lambda/default-ol/worker/code/1077-fn7", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1077-fn7", "type": "invocation"} +{"deps": [], "name": "simplejson", "top": ["simplejson"], "type": "package"} +{"deps": ["simplejson"], "name": "/root/open-lambda/default-ol/worker/code/1080-fn8", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1080-fn8", "type": "invocation"} +{"deps": [], "name": "protobuf", "top": [], "type": "package"} +{"deps": ["protobuf"], "name": "/root/open-lambda/default-ol/worker/code/1084-fn9", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1084-fn9", "type": "invocation"} +{"deps": ["jinja2", "markupsafe"], "name": "/root/open-lambda/default-ol/worker/code/1088-fn10", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1088-fn10", "type": "invocation"} +{"deps": [], "name": "pip", "top": ["pip"], "type": "package"} +{"deps": ["pip"], "name": "/root/open-lambda/default-ol/worker/code/1094-fn11", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1094-fn11", "type": "invocation"} +{"deps": [], "name": "setuptools", "top": ["_distutils_hack", "pkg_resources", "setuptools"], "type": "package"} +{"deps": ["setuptools"], "name": "/root/open-lambda/default-ol/worker/code/1098-fn12", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1098-fn12", "type": "invocation"} +{"deps": ["charset-normalizer", "certifi", "urllib3", "idna"], "name": "requests", "top": ["requests"], "type": "package"} +{"deps": [], "name": "charset-normalizer", "top": ["charset_normalizer"], "type": "package"} +{"deps": [], "name": "certifi", "top": ["certifi"], "type": "package"} +{"deps": [], "name": "urllib3", "top": ["urllib3"], "type": "package"} +{"deps": [], "name": "idna", "top": ["idna"], "type": "package"} +{"deps": ["requests", "charset-normalizer", "certifi", "urllib3", "idna"], "name": "/root/open-lambda/default-ol/worker/code/1105-fn13", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1105-fn13", "type": "invocation"} +{"deps": [], "name": "mock", "top": ["mock"], "type": "package"} +{"deps": ["mock"], "name": "/root/open-lambda/default-ol/worker/code/1116-fn14", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1116-fn14", "type": "invocation"} +{"deps": ["werkzeug", "markupsafe"], "name": "/root/open-lambda/default-ol/worker/code/1120-fn15", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1120-fn15", "type": "invocation"} +{"deps": [], "name": "dnspython", "top": ["dns"], "type": "package"} +{"deps": ["dnspython"], "name": "/root/open-lambda/default-ol/worker/code/1123-fn16", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1123-fn16", "type": "invocation"} +{"deps": ["six"], "name": "/root/open-lambda/default-ol/worker/code/1127-fn17", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1127-fn17", "type": "invocation"} +{"deps": ["pyqt5-qt5", "pyqt5-sip"], "name": "pyqt5", "top": ["PyQt5"], "type": "package"} +{"deps": [], "name": "pyqt5-qt5", "top": [], "type": "package"} +{"deps": [], "name": "pyqt5-sip", "top": [], "type": "package"} +{"deps": ["pyqt5", "pyqt5-qt5", "pyqt5-sip"], "name": "/root/open-lambda/default-ol/worker/code/1133-fn18", "type": "function"} +{"name": "/root/open-lambda/default-ol/worker/code/1133-fn18", "type": "invocation"} diff --git a/min-image/runtimes/python/ol.c b/min-image/runtimes/python/ol.c index 94b6c3e99..6addeb5f2 100644 --- a/min-image/runtimes/python/ol.c +++ b/min-image/runtimes/python/ol.c @@ -11,6 +11,10 @@ static PyObject *ol_unshare(PyObject *module) { int res = unshare(CLONE_NEWUTS|CLONE_NEWPID|CLONE_NEWIPC); + if (res == -1) { + PyErr_SetString(PyExc_RuntimeError, strerror(errno)); + return NULL; + } return Py_BuildValue("i", res); } @@ -368,7 +372,8 @@ static PyObject *ol_enable_seccomp(PyObject *module) { SCMP_SYS(wait4), SCMP_SYS(waitid), SCMP_SYS(waitpid), - SCMP_SYS(writev) + SCMP_SYS(writev), + SCMP_SYS(mount) }; for (int i=0; i<(int)(sizeof(calls)/sizeof(calls[0])); i++) { diff --git a/min-image/runtimes/python/server.py b/min-image/runtimes/python/server.py index c1e6405f7..71e72f4b1 100644 --- a/min-image/runtimes/python/server.py +++ b/min-image/runtimes/python/server.py @@ -32,7 +32,7 @@ def recv_fds(sock, msglen, maxfds): return msg, list(fds) def web_server(): - print(f"server.py: start web server on fd: {file_sock.fileno()}") + # print(f"server.py: start web server on fd: {file_sock.fileno()}") sys.path.append('/handler') # TODO: as a safeguard, we should add a mechanism so that the @@ -73,9 +73,16 @@ def fork_server(): global file_sock file_sock.setblocking(True) - print(f"server.py: start fork server on fd: {file_sock.fileno()}") + # print(f"server.py: start fork server on fd: {file_sock.fileno()}") while True: + while True: + try: + pid, _ = os.waitpid(-1, os.WNOHANG) + if pid == 0: + break + except ChildProcessError: + break client, _info = file_sock.accept() _, fds = recv_fds(client, 8, 2) root_fd, mem_cgroup_fd = fds @@ -128,7 +135,11 @@ def start_container(): global file_sock # TODO: if we can get rid of this, we can get rid of the ns module - return_val = ol.unshare() + try: + return_val = ol.unshare() + except RuntimeError as e: + print("An error occurred in ol.unshare():", e) + return_val = 1 assert return_val == 0 # we open a new .sock file in the child, before starting the grand @@ -180,24 +191,24 @@ def main(): print('seccomp enabled') bootstrap_path = sys.argv[1] - cgroup_fds = 0 - if len(sys.argv) > 2: - cgroup_fds = int(sys.argv[2]) - - # join cgroups passed to us. The fact that chroot is called - # before we start means we also need to pass FDs to the cgroups we - # want to join, because chroot happens before we run, so we can no - # longer reach them by paths. - pid = str(os.getpid()) - for i in range(cgroup_fds): - # golang guarantees extras start at 3: https://golang.org/pkg/os/exec/#Cmd - fd_id = 3 + i - with os.fdopen(fd_id, "w") as file: - file.write(pid) - print(f'server.py: joined cgroup, close FD {fd_id}') + # cgroup_fds = 0 + # if len(sys.argv) > 2: + # cgroup_fds = int(sys.argv[2]) + # + # # join cgroups passed to us. The fact that chroot is called + # # before we start means we also need to pass FDs to the cgroups we + # # want to join, because chroot happens before we run, so we can no + # # longer reach them by paths. + # pid = str(os.getpid()) + # for i in range(cgroup_fds): + # # golang guarantees extras start at 3: https://golang.org/pkg/os/exec/#Cmd + # fd_id = 3 + i + # with os.fdopen(fd_id, "w") as file: + # file.write(pid) + # print(f'server.py: joined cgroup, close FD {fd_id}') start_container() if __name__ == '__main__': - main() + main() \ No newline at end of file diff --git a/min-image/spin b/min-image/spin new file mode 100755 index 000000000..28111e64f Binary files /dev/null and b/min-image/spin differ diff --git a/node-1.json b/node-1.json new file mode 100644 index 000000000..92a9adaf9 --- /dev/null +++ b/node-1.json @@ -0,0 +1,5 @@ +{ + "packages": [], + "children": [], + "split_generation": 0 + } diff --git a/src/boss/boss.go b/src/boss/boss.go index 5799b2406..6ce634528 100644 --- a/src/boss/boss.go +++ b/src/boss/boss.go @@ -7,11 +7,12 @@ import ( "log" "net/http" "os" - "strconv" "os/signal" + "strconv" "syscall" - "github.com/open-lambda/open-lambda/ol/boss/cloudvm" + "github.com/open-lambda/open-lambda/ol/boss/autoscaling" + "github.com/open-lambda/open-lambda/ol/boss/cloudvm" ) const ( @@ -19,11 +20,15 @@ const ( BOSS_STATUS_PATH = "/status" SCALING_PATH = "/scaling/worker_count" SHUTDOWN_PATH = "/shutdown" + RESTART_PATH = "/restart" + CHANGE_LB_PATH = "/change_lb" + CHANGE_TREE_PATH = "/change_tree" + CHANGE_MEM_PATH = "/change_mem" ) type Boss struct { workerPool *cloudvm.WorkerPool - autoScaler autoscaling.Scaling + autoScaler autoscaling.Scaling } func (b *Boss) BossStatus(w http.ResponseWriter, r *http.Request) { @@ -36,14 +41,13 @@ func (b *Boss) BossStatus(w http.ResponseWriter, r *http.Request) { b.workerPool.StatusCluster(), b.workerPool.StatusTasks(), } - + if b, err := json.MarshalIndent(output, "", "\t"); err != nil { panic(err) } else { w.Write(b) } - } func (b *Boss) Close(w http.ResponseWriter, r *http.Request) { @@ -51,6 +55,7 @@ func (b *Boss) Close(w http.ResponseWriter, r *http.Request) { if Conf.Scaling == "threshold-scaler" { b.autoScaler.Close() } + os.Exit(0) } func (b *Boss) ScalingWorker(w http.ResponseWriter, r *http.Request) { @@ -92,11 +97,116 @@ func (b *Boss) ScalingWorker(w http.ResponseWriter, r *http.Request) { // STEP 2: adjust target worker count b.workerPool.SetTarget(worker_count) - + //respond with status b.BossStatus(w, r) } +func (b *Boss) ChangeTree(w http.ResponseWriter, r *http.Request) { + // STEP 1: get int (worker count) from POST body, or return an error + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + _, err := w.Write([]byte("POST a policy to /change_lb\n")) + if err != nil { + log.Printf("(1) could not write web response: %s\n", err.Error()) + } + return + } + + contents, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err := w.Write([]byte("could not read body of web request\n")) + if err != nil { + log.Printf("(2) could not write web response: %s\n", err.Error()) + } + return + } + + new_tree := string(contents) + Conf.Tree_path = new_tree + + b.workerPool.ChangeTree(new_tree) +} + +func (b *Boss) ChangeMem(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + _, err := w.Write([]byte("POST a policy to /change_lb\n")) + if err != nil { + log.Printf("(1) could not write web response: %s\n", err.Error()) + } + return + } + + contents, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err := w.Write([]byte("could not read body of web request\n")) + if err != nil { + log.Printf("(2) could not write web response: %s\n", err.Error()) + } + return + } + + newMemStr := string(contents) + newMem, err := strconv.Atoi(newMemStr) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + _, err := w.Write([]byte("invalid integer value\n")) + if err != nil { + log.Printf("(3) could not write web response: %s\n", err.Error()) + } + return + } + + Conf.Worker_mem = newMem + + b.workerPool.ChangeMem(newMem) + b.BossStatus(w, r) +} + +func (b *Boss) RestartWorkers(w http.ResponseWriter, r *http.Request) { + b.workerPool.Restart() + b.BossStatus(w, r) +} + +func (b *Boss) ChangeLb(w http.ResponseWriter, r *http.Request) { + // STEP 1: get int (worker count) from POST body, or return an error + if r.Method != "POST" { + w.WriteHeader(http.StatusMethodNotAllowed) + _, err := w.Write([]byte("POST a policy to /change_lb\n")) + if err != nil { + log.Printf("(1) could not write web response: %s\n", err.Error()) + } + return + } + + contents, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err := w.Write([]byte("could not read body of web request\n")) + if err != nil { + log.Printf("(2) could not write web response: %s\n", err.Error()) + } + return + } + + new_policy := string(contents) + Conf.Lb = new_policy + err = checkConf() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, err := w.Write([]byte("body of post to /change_policy should be random, sharding, kmeans, kmodes, hash\n")) + if err != nil { + log.Printf("(3) could not write web response: %s\n", err.Error()) + } + return + } + + b.workerPool.ChangePolicy(new_policy) +} + func BossMain() (err error) { fmt.Printf("WARNING! Boss incomplete (only use this as part of development process).\n") @@ -118,6 +228,10 @@ func BossMain() (err error) { http.HandleFunc(SCALING_PATH, boss.ScalingWorker) http.HandleFunc(RUN_PATH, boss.workerPool.RunLambda) http.HandleFunc(SHUTDOWN_PATH, boss.Close) + http.HandleFunc(RESTART_PATH, boss.RestartWorkers) + http.HandleFunc(CHANGE_LB_PATH, boss.ChangeLb) + http.HandleFunc(CHANGE_TREE_PATH, boss.ChangeTree) + http.HandleFunc(CHANGE_MEM_PATH, boss.ChangeMem) // clean up if signal hits us c := make(chan os.Signal, 1) diff --git a/src/boss/cloudvm/api.go b/src/boss/cloudvm/api.go index 4a080ef8b..17ad66d58 100644 --- a/src/boss/cloudvm/api.go +++ b/src/boss/cloudvm/api.go @@ -18,13 +18,26 @@ const ( DESTROYING WorkerState = 3 ) +const ( + resourceGroupName = "ol-group" + location = "eastus" + disk = "ol-boss-new_OsDisk_1_a3f9be95785c437fabe8819c5807ca13" + vnet = "ol-boss-new-vnet" + snapshot = "ol-boss-new-snapshot" +) + +const ( + test_path = "/home/azureuser/paper-tree-cache/analysis/17/" + ssh_key_path = "/home/azureuser/.ssh/ol-boss_key.pem" +) + /* Defines the interface for platform-specific functions */ type WorkerPoolPlatform interface { - NewWorker(workerId string) *Worker //return new worker struct - CreateInstance(worker *Worker) //create new instance in the cloud platform - DeleteInstance(worker *Worker) //delete cloud platform instance associated with give worker struct + NewWorker(workerId string) *Worker //return new worker struct + CreateInstance(worker *Worker) error //create new instance in the cloud platform + DeleteInstance(worker *Worker) error //delete cloud platform instance associated with give worker struct ForwardTask(w http.ResponseWriter, r *http.Request, worker *Worker) } @@ -34,7 +47,7 @@ WorkerPoolPlatform. */ type WorkerPool struct { WorkerPoolPlatform - platform string + platform string worker_cap int sync.Mutex nextId int // the next new worker's id @@ -42,7 +55,9 @@ type WorkerPool struct { workers []map[string]*Worker // a slice of maps // Slice: index maps to a const WorkerState // Map: key=worker id (string), value=pointer to worker - queue chan *Worker // a queue of running workers + queue chan *Worker // a queue of running workers + rr_index int + rr_queue []*Worker clusterLogFile *os.File taskLogFile *os.File @@ -51,6 +66,19 @@ type WorkerPool struct { totalTask int32 sumLatency int64 nLatency int64 + + numGroup int + nextGroup int + groups map[int]*GroupWorker // this mappes the groupId to the GroupWorker + + taksId int32 + + workers_queue map[*Worker]chan string // mappes worker to its channel of handling requests +} + +type GroupWorker struct { + groupId int // specifies the group name + groupWorkers map[string]*Worker // what workers does this group have. The worker in this map must be running } /* @@ -60,6 +88,11 @@ type Worker struct { workerId string workerIp string numTask int32 + allTaks int32 pool *WorkerPool state WorkerState + groupId int + + funcLogFile *os.File + funcLog *log.Logger } diff --git a/src/boss/cloudvm/azure.go b/src/boss/cloudvm/azure.go index 94a930202..d8115ef5a 100644 --- a/src/boss/cloudvm/azure.go +++ b/src/boss/cloudvm/azure.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "log" "math/rand" @@ -23,6 +24,8 @@ var blobName string var containerName string var err error var containerClient azblob.ContainerClient +var subscriptionId string +var conf *AzureConfig func Create(contents string) { url := "https://openlambda.blob.core.windows.net/" //replace with your Azure storage account name @@ -112,6 +115,15 @@ func randomString() string { return strconv.Itoa(r.Int()) } +func AzureCreateVM(worker *Worker) (*AzureConfig, error) { + subscriptionId = os.Getenv("AZURE_SUBSCRIPTION_ID") + if len(subscriptionId) == 0 { + err := errors.New("AZURE_SUBSCRIPTION_ID is not set") + return nil, err + } + return createVM(worker) +} + func AzureMain(contents string) { fmt.Printf("Azure Blob storage quick start sample\n") diff --git a/src/boss/cloudvm/azure_vm.go b/src/boss/cloudvm/azure_vm.go new file mode 100644 index 000000000..db9a23f73 --- /dev/null +++ b/src/boss/cloudvm/azure_vm.go @@ -0,0 +1,912 @@ +package cloudvm + +import ( + "bytes" + "context" + "encoding/binary" + "log" + "net" + "net/http" + "strconv" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" +) + +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +func iptoInt(ip string) uint32 { + var long uint32 + ip = ip[:len(ip)-3] // xxxx.xxxx.xxxx.xxxx/24 + binary.Read(bytes.NewBuffer(net.ParseIP(ip).To4()), binary.BigEndian, &long) + return long +} + +func backtoIP4(ipInt int64) string { + // need to do two bit shifting and “0xff” masking + b0 := strconv.FormatInt((ipInt>>24)&0xff, 10) + b1 := strconv.FormatInt((ipInt>>16)&0xff, 10) + b2 := strconv.FormatInt((ipInt>>8)&0xff, 10) + b3 := strconv.FormatInt((ipInt & 0xff), 10) + b3 += "/24" + return b0 + "." + b1 + "." + b2 + "." + b3 +} + +var create_lock sync.Mutex + +func createVM(worker *Worker) (*AzureConfig, error) { + vmName := worker.workerId + diskName := disk + vnetName := vnet + snapshotName := snapshot + conn, err := connectionAzure() + if err != nil { + log.Println(err.Error()) + return conf, err + } + ctx := context.Background() + + log.Println("start creating virtual machine...") + resourceGroup, err := createResourceGroup(ctx, conn) + if err != nil { + log.Println(err.Error()) + return conf, err + } + log.Printf("Created resource group: %s", *resourceGroup.ID) + + newDiskName := vmName + "-disk" + subnetName := vmName + "-subnet" + nsgName := vmName + "-nsg" + nicName := vmName + "-nic" + //publicIPName := vmName + "-public-ip" + + // create snapshot + disk, err := getDisk(ctx, conn, diskName) + if err != nil { + log.Printf("cannot get disk: %s", err) + return conf, err + } + log.Println("Fetched disk:", *disk.ID) + + var snapshot *armcompute.Snapshot + // If the snapshot isn't updated in this iteration + create_lock.Lock() + if !AzureConf.Snapshot_updated { + log.Println("start delete old snapshot") + err = deleteSnapshot(ctx, conn, snapshotName) + if err != nil { + log.Print(err) + return conf, err + } + + log.Println("start create snapshot") + snapshot, err = createSnapshot(ctx, conn, *disk.ID, snapshotName) + if err != nil { + log.Print(err) + return conf, err + } + log.Println("Created snapshot:", *snapshot.ID) + + AzureConf.Snapshot_updated = true + create_lock.Unlock() + } else { + create_lock.Unlock() + // Fetch the snapshot and create the disk + log.Println("start fetch snapshot") + snapshot, err = getSnapshot(ctx, conn, snapshotName) + if err != nil { + log.Print(err) + return conf, err + } + log.Println("fetched snapshot") + } + + log.Println("start create disk") + new_disk, err := createDisk(ctx, conn, *snapshot.ID, newDiskName) + if err != nil { + log.Print(err) + return conf, err + } + log.Println("Created disk:", *new_disk.ID) + + new_vm := new(vmStatus) + // get network + create_lock.Lock() + virtualNetwork, err := getVirtualNetwork(ctx, conn, vnetName) + if err != nil { + log.Println(err.Error()) + return conf, err + } + log.Printf("Fetched virtual network: %s", *virtualNetwork.ID) + new_vm.Virtual_net = *virtualNetwork + + // get subnets + subnets := virtualNetwork.Properties.Subnets + // last subnet addr + lastSubnet64 := int64(iptoInt(*subnets[len(subnets)-1].Properties.AddressPrefix)) + lastSubnet64 += 256 + newSubnetIP := backtoIP4(lastSubnet64) + + subnet, err := createSubnets(ctx, conn, newSubnetIP, vnetName, subnetName) + if err != nil { + log.Println(err.Error()) + return conf, err + } + log.Printf("Created subnet: %s", *subnet.ID) + create_lock.Unlock() + + new_vm.Subnet = *subnet + + /* + publicIP, err := createPublicIP(ctx, conn) + if err != nil { + log.Println("cannot create public IP address:%+v", err) + } + log.Printf("Created public IP address: %s", *publicIP.ID) + conf.Resource_groups.Rgroup[0].Public_ip[vmNum] = *publicIP + */ + + // network security group + nsg, err := createNetworkSecurityGroup(ctx, conn, nsgName) + if err != nil { + log.Println(err.Error()) + return conf, err + } + log.Printf("Created network security group: %s", *nsg.ID) + new_vm.Security_group = *nsg + + netWorkInterface, err := createNetWorkInterfaceWithoutIp(ctx, conn, *subnet.ID, *nsg.ID, nicName) + if err != nil { + log.Println(err.Error()) + return conf, err + } + log.Printf("Created network interface: %s", *netWorkInterface.ID) + new_vm.Net_ifc = *netWorkInterface + + networkInterfaceID := netWorkInterface.ID + worker.workerIp = *netWorkInterface.Properties.IPConfigurations[0].Properties.PrivateIPAddress + + // create virtual machine + + virtualMachine, err := createVirtualMachine(ctx, conn, *networkInterfaceID, *new_disk.ID, newDiskName, vmName) + tolerance := 3 + iter := 1 + for err != nil { + log.Println(err.Error()) + // Handle Error + if iter <= tolerance { + log.Println("Iteration smaller than 3, Delete the vm and retry") + err = deleteVirtualMachine(ctx, conn, worker.workerId) + if err != nil { + log.Fatalf("cannot delete virtual machine:%+v", err) + } + log.Println("Successfully deleted the vm, realloc the vm") + virtualMachine, err = createVirtualMachine(ctx, conn, *networkInterfaceID, *new_disk.ID, newDiskName, vmName) + if err != nil { + iter += 1 + } else { + break + } + } else { + log.Println("Iteration greater than 3, this vm cannot be created successfully") + return conf, err + } + } + log.Printf("Created new virual machine: %s", *virtualMachine.ID) + + log.Println("Virtual machine created successfully") + new_vm.Vm = *virtualMachine + new_vm.Status = "Running" + + // log.Printf("Start to restart the vm: %s", *virtualMachine.Name) + // err = restartVirtualMachine(ctx, conn, *virtualMachine.Name) + // if err != nil { + // log.Println(err.Error()) + // return conf, err + // } + // log.Printf("Restart the vm successfully\n") + + create_lock.Lock() + + if conf == nil { + conf = new(AzureConfig) + first_rgroup := new(rgroup) + conf.Resource_groups.Rgroup = append(conf.Resource_groups.Rgroup, *first_rgroup) + } + conf.Resource_groups.Rgroup[0].Resource = *resourceGroup + rg := &conf.Resource_groups.Rgroup[0] + rg.Vms = append(rg.Vms, *new_vm) + conf.Resource_groups.Numrgroup = 1 + conf.Resource_groups.Rgroup[0].Numvm += 1 + + if err := WriteAzureConfig(conf); err != nil { + log.Println(err.Error()) + return conf, err + } + create_lock.Unlock() + + return conf, nil +} + +func cleanupVM(worker *AzureWorker) { + conn, err := connectionAzure() + if err != nil { + log.Fatalf("cannot connection Azure:%+v", err) + } + ctx := context.Background() + + log.Println("start deleting virtual machine...") + err = deleteVirtualMachine(ctx, conn, worker.workerId) + if err != nil { + log.Fatalf("cannot delete virtual machine:%+v", err) + } + log.Println("deleted virtual machine") + + err = deleteDisk(ctx, conn, worker.diskName) + if err != nil { + log.Fatalf("cannot delete disk:%+v", err) + } + log.Println("deleted disk") + + err = deleteNetWorkInterface(ctx, conn, worker.nicName) + if err != nil { + log.Fatalf("cannot delete network interface:%+v", err) + } + log.Println("deleted network interface") + + err = deleteNetworkSecurityGroup(ctx, conn, worker.nsgName) + if err != nil { + log.Fatalf("cannot delete network security group:%+v", err) + } + log.Println("deleted network security group") + + if worker.publicIPName != "" { + err = deletePublicIP(ctx, conn, worker.publicIPName) + if err != nil { + log.Fatalf("cannot delete public IP address:%+v", err) + } + log.Println("deleted public IP address") + } + + create_lock.Lock() + err = deleteSubnets(ctx, conn, worker.vnetName, worker.subnetName) + create_lock.Unlock() + if err != nil { + log.Fatalf("cannot delete subnet:%+v", err) + } + log.Println("deleted subnet") + + log.Println("success deleted virtual machine.") +} + +func connectionAzure() (azcore.TokenCredential, error) { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, err + } + return cred, nil +} + +func createResourceGroup(ctx context.Context, cred azcore.TokenCredential) (*armresources.ResourceGroup, error) { + resourceGroupClient, err := armresources.NewResourceGroupsClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armresources.ResourceGroup{ + Location: to.Ptr(location), + Tags: map[string]*string{"sample-rs-tag": to.Ptr("sample-tag")}, // resource group update tags + } + + resp, err := resourceGroupClient.CreateOrUpdate(ctx, resourceGroupName, parameters, nil) + if err != nil { + return nil, err + } + + return &resp.ResourceGroup, nil +} + +func deleteResourceGroup(ctx context.Context, cred azcore.TokenCredential) error { + resourceGroupClient, err := armresources.NewResourceGroupsClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := resourceGroupClient.BeginDelete(ctx, resourceGroupName, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func getVirtualNetwork(ctx context.Context, cred azcore.TokenCredential, vnetName string) (*armnetwork.VirtualNetwork, error) { + vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + resp, err := vnetClient.Get(ctx, resourceGroupName, vnetName, nil) + if err != nil { + return nil, err + } + + return &resp.VirtualNetwork, nil +} + +func createVirtualNetwork(ctx context.Context, cred azcore.TokenCredential, subnetName string, vnetName string) (*armnetwork.VirtualNetwork, error) { + vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.VirtualNetwork{ + Location: to.Ptr(location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.1.0.0/16"), // example 10.1.0.0/16 + }, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr(subnetName + "3"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.1.0.0/24"), + }, + }, + }, + }, + } + + pollerResponse, err := vnetClient.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.VirtualNetwork, nil +} + +func deleteVirtualNetWork(ctx context.Context, cred azcore.TokenCredential, vnet string) error { + vnetClient, err := armnetwork.NewVirtualNetworksClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := vnetClient.BeginDelete(ctx, resourceGroupName, vnet, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func createSubnets(ctx context.Context, cred azcore.TokenCredential, addr string, vnetName string, subnetName string) (*armnetwork.Subnet, error) { + subnetClient, err := armnetwork.NewSubnetsClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.Subnet{ + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr(addr), + }, + } + + pollerResponse, err := subnetClient.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, subnetName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.Subnet, nil +} + +func deleteSubnets(ctx context.Context, cred azcore.TokenCredential, vnet string, subnet string) error { + subnetClient, err := armnetwork.NewSubnetsClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := subnetClient.BeginDelete(ctx, resourceGroupName, vnet, subnet, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func createNetworkSecurityGroup(ctx context.Context, cred azcore.TokenCredential, nsgName string) (*armnetwork.SecurityGroup, error) { + nsgClient, err := armnetwork.NewSecurityGroupsClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.SecurityGroup{ + Location: to.Ptr(location), + Properties: &armnetwork.SecurityGroupPropertiesFormat{ + SecurityRules: []*armnetwork.SecurityRule{ + // Windows connection to virtual machine needs to open port 3389,RDP + // inbound + { + Name: to.Ptr("sample_inbound_22"), // + Properties: &armnetwork.SecurityRulePropertiesFormat{ + SourceAddressPrefix: to.Ptr("0.0.0.0/0"), + SourcePortRange: to.Ptr("*"), + DestinationAddressPrefix: to.Ptr("0.0.0.0/0"), + DestinationPortRange: to.Ptr("22"), + Protocol: to.Ptr(armnetwork.SecurityRuleProtocolTCP), + Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), + Priority: to.Ptr[int32](100), + Description: to.Ptr("sample network security group inbound port 22"), + Direction: to.Ptr(armnetwork.SecurityRuleDirectionInbound), + }, + }, + // outbound + { + Name: to.Ptr("sample_outbound_22"), // + Properties: &armnetwork.SecurityRulePropertiesFormat{ + SourceAddressPrefix: to.Ptr("0.0.0.0/0"), + SourcePortRange: to.Ptr("*"), + DestinationAddressPrefix: to.Ptr("0.0.0.0/0"), + DestinationPortRange: to.Ptr("22"), + Protocol: to.Ptr(armnetwork.SecurityRuleProtocolTCP), + Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), + Priority: to.Ptr[int32](100), + Description: to.Ptr("sample network security group outbound port 22"), + Direction: to.Ptr(armnetwork.SecurityRuleDirectionOutbound), + }, + }, + }, + }, + } + + pollerResponse, err := nsgClient.BeginCreateOrUpdate(ctx, resourceGroupName, nsgName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.SecurityGroup, nil +} + +func deleteNetworkSecurityGroup(ctx context.Context, cred azcore.TokenCredential, nsg string) error { + nsgClient, err := armnetwork.NewSecurityGroupsClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := nsgClient.BeginDelete(ctx, resourceGroupName, nsg, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + return nil +} + +func createPublicIP(ctx context.Context, cred azcore.TokenCredential, publicIPName string) (*armnetwork.PublicIPAddress, error) { + publicIPAddressClient, err := armnetwork.NewPublicIPAddressesClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.PublicIPAddress{ + Location: to.Ptr(location), + Properties: &armnetwork.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), // Static or Dynamic + }, + } + + pollerResponse, err := publicIPAddressClient.BeginCreateOrUpdate(ctx, resourceGroupName, publicIPName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + return &resp.PublicIPAddress, err +} + +func deletePublicIP(ctx context.Context, cred azcore.TokenCredential, ipName string) error { + publicIPAddressClient, err := armnetwork.NewPublicIPAddressesClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := publicIPAddressClient.BeginDelete(ctx, resourceGroupName, ipName, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + return nil +} + +func createNetWorkInterfaceWithoutIp(ctx context.Context, cred azcore.TokenCredential, subnetID string, networkSecurityGroupID string, nicName string) (*armnetwork.Interface, error) { + nicClient, err := armnetwork.NewInterfacesClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.Interface{ + Location: to.Ptr(location), + Properties: &armnetwork.InterfacePropertiesFormat{ + //NetworkSecurityGroup: + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + { + Name: to.Ptr("ipConfig"), + Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), + Subnet: &armnetwork.Subnet{ + ID: to.Ptr(subnetID), + }, + }, + }, + }, + NetworkSecurityGroup: &armnetwork.SecurityGroup{ + ID: to.Ptr(networkSecurityGroupID), + }, + }, + } + + pollerResponse, err := nicClient.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.Interface, err +} + +func createNetWorkInterface(ctx context.Context, cred azcore.TokenCredential, subnetID string, publicIPID string, networkSecurityGroupID string, nicName string) (*armnetwork.Interface, error) { + nicClient, err := armnetwork.NewInterfacesClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armnetwork.Interface{ + Location: to.Ptr(location), + Properties: &armnetwork.InterfacePropertiesFormat{ + //NetworkSecurityGroup: + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + { + Name: to.Ptr("ipConfig"), + Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), + Subnet: &armnetwork.Subnet{ + ID: to.Ptr(subnetID), + }, + PublicIPAddress: &armnetwork.PublicIPAddress{ + ID: to.Ptr(publicIPID), + }, + }, + }, + }, + NetworkSecurityGroup: &armnetwork.SecurityGroup{ + ID: to.Ptr(networkSecurityGroupID), + }, + }, + } + + pollerResponse, err := nicClient.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.Interface, err +} + +func deleteNetWorkInterface(ctx context.Context, cred azcore.TokenCredential, nic string) error { + nicClient, err := armnetwork.NewInterfacesClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := nicClient.BeginDelete(ctx, resourceGroupName, nic, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func createVirtualMachine(ctx context.Context, cred azcore.TokenCredential, networkInterfaceID string, new_diskID string, newDiskName string, vmName string) (*armcompute.VirtualMachine, error) { + vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + parameters := armcompute.VirtualMachine{ + Location: to.Ptr(location), + Identity: &armcompute.VirtualMachineIdentity{ + Type: to.Ptr(armcompute.ResourceIdentityTypeNone), + }, + Properties: &armcompute.VirtualMachineProperties{ + StorageProfile: &armcompute.StorageProfile{ + OSDisk: &armcompute.OSDisk{ + Name: to.Ptr(newDiskName), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesAttach), + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + ManagedDisk: &armcompute.ManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesPremiumLRS), // OSDisk type Standard/Premium HDD/SSD + ID: to.Ptr(new_diskID), + }, + OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), + }, + }, + HardwareProfile: &armcompute.HardwareProfile{ + // TODO: make it user's choice + VMSize: to.Ptr(armcompute.VirtualMachineSizeTypes("Standard_D8s_v5")), // VM size include vCPUs,RAM,Data Disks,Temp storage. + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(networkInterfaceID), + }, + }, + }, + }, + } + + pollerResponse, err := vmClient.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, parameters, nil) + if err != nil { + return nil, err + } + + resp, err := pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.VirtualMachine, nil +} + +func deleteVirtualMachine(ctx context.Context, cred azcore.TokenCredential, name string) error { + vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := vmClient.BeginDelete(ctx, resourceGroupName, name, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func restartVirtualMachine(ctx context.Context, cred azcore.TokenCredential, vmName string) error { + vmClient, err := armcompute.NewVirtualMachinesClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := vmClient.BeginRestart(ctx, resourceGroupName, vmName, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + + return nil +} + +func getDisk(ctx context.Context, cred azcore.TokenCredential, diskName string) (*armcompute.Disk, error) { + diskClient, err := armcompute.NewDisksClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + resp, err := diskClient.Get( + ctx, + resourceGroupName, + diskName, + nil, + ) + + if err != nil { + return nil, err + } + + return &resp.Disk, nil +} + +func createDisk(ctx context.Context, cred azcore.TokenCredential, source_disk string, newDiskName string) (*armcompute.Disk, error) { + disksClient, err := armcompute.NewDisksClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + performance_tier := "P15" + + pollerResp, err := disksClient.BeginCreateOrUpdate( + ctx, + resourceGroupName, + newDiskName, + armcompute.Disk{ + Location: to.Ptr(location), + SKU: &armcompute.DiskSKU{ + Name: to.Ptr(armcompute.DiskStorageAccountTypesPremiumLRS), + }, + Properties: &armcompute.DiskProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: to.Ptr(source_disk), + }, + DiskSizeGB: to.Ptr[int32](64), + Tier: &performance_tier, + }, + }, + nil, + ) + if err != nil { + return nil, err + } + + resp, err := pollerResp.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.Disk, nil +} + +func deleteDisk(ctx context.Context, cred azcore.TokenCredential, disk string) error { + diskClient, err := armcompute.NewDisksClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := diskClient.BeginDelete(ctx, resourceGroupName, disk, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + return nil +} + +func getSnapshot(ctx context.Context, cred azcore.TokenCredential, snapshotName string) (*armcompute.Snapshot, error) { + snapshotClient, err := armcompute.NewSnapshotsClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + resp, err := snapshotClient.Get( + ctx, + resourceGroupName, + snapshotName, + nil, + ) + + if err != nil { + return nil, err + } + + return &resp.Snapshot, nil +} + +func createSnapshot(ctx context.Context, cred azcore.TokenCredential, diskID string, snapshotName string) (*armcompute.Snapshot, error) { + snapshotClient, err := armcompute.NewSnapshotsClient(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + + pollerResp, err := snapshotClient.BeginCreateOrUpdate( + ctx, + resourceGroupName, + snapshotName, + armcompute.Snapshot{ + Location: to.Ptr(location), + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), + SourceResourceID: to.Ptr(diskID), + }, + }, + }, + nil, + ) + if err != nil { + return nil, err + } + + resp, err := pollerResp.PollUntilDone(ctx, nil) + if err != nil { + return nil, err + } + + return &resp.Snapshot, nil +} + +func deleteSnapshot(ctx context.Context, cred azcore.TokenCredential, snapshotName string) error { + snapshotClient, err := armcompute.NewSnapshotsClient(subscriptionId, cred, nil) + if err != nil { + return err + } + + pollerResponse, err := snapshotClient.BeginDelete(ctx, resourceGroupName, snapshotName, nil) + if err != nil { + return err + } + + _, err = pollerResponse.PollUntilDone(ctx, nil) + if err != nil { + return err + } + return nil +} diff --git a/src/boss/cloudvm/azure_worker.go b/src/boss/cloudvm/azure_worker.go new file mode 100644 index 000000000..fba7144b1 --- /dev/null +++ b/src/boss/cloudvm/azure_worker.go @@ -0,0 +1,255 @@ +package cloudvm + +import ( + "errors" + "fmt" + "log" + "net/http" + "os" + "os/exec" + "sync" + "time" +) + +type AzureWorkerPool struct { + workerNum int + workers *map[string]*AzureWorker + nextId int +} + +type AzureWorker struct { + pool *AzureWorkerPool + workerId string + configPosit int + diskName string + vnetName string + subnetName string + nsgName string + nicName string + publicIPName string + privateAddr string + publicAddr string +} + +func NewAzureWorkerPool() (*WorkerPool, error) { + conf, err := ReadAzureConfig() + if err != nil { + return nil, err + } + if len(conf.Resource_groups.Rgroup) != 1 { + err1 := errors.New("should have one resource group") + return nil, err1 + } + num := conf.Resource_groups.Rgroup[0].Numvm + workers := make(map[string]*AzureWorker, num) + pool := &AzureWorkerPool{ + workerNum: num, + workers: &workers, + nextId: num + 1, + } + for i := 0; i < num; i++ { + cur_vm := conf.Resource_groups.Rgroup[0].Vms[i] + worker_i := &AzureWorker{ + pool: pool, + privateAddr: *cur_vm.Net_ifc.Properties.IPConfigurations[0].Properties.PrivateIPAddress, + workerId: *cur_vm.Vm.Name, + configPosit: num, + } + publicWrap := conf.Resource_groups.Rgroup[0].Vms[i].Net_ifc.Properties.IPConfigurations[0].Properties.PublicIPAddress + if publicWrap == nil { + worker_i.publicAddr = "" + } else { + worker_i.publicAddr = *publicWrap.Properties.IPAddress + } + } + parent := &WorkerPool{ + WorkerPoolPlatform: pool, + } + return parent, nil +} + +// Is nextId here useful? I store nextId in the pool +// TODO: maybe store nextId to the config file so that if the boss shut down, it know how to do next time +func (pool *AzureWorkerPool) NewWorker(workerId string) *Worker { + return &Worker{ + workerId: workerId, + workerIp: "", + } +} + +// TODO: make AzureCreateVM multiple-threaded +func (pool *AzureWorkerPool) CreateInstance(worker *Worker) error { + log.Printf("creating an azure worker\n") + conf, err := AzureCreateVM(worker) + if err != nil { + return err + } + + vmNum := conf.Resource_groups.Rgroup[0].Numvm + private := worker.workerIp + newDiskName := worker.workerId + "-disk" + newNicName := worker.workerId + "-nic" + newNsgName := worker.workerId + "-nsg" + subnetName := worker.workerId + "-subnet" + vnetName := "ol-boss-new-vnet" + publicIPName := "" + public := "" + + azworker := &AzureWorker{ + pool: pool, + workerId: worker.workerId, + configPosit: vmNum - 1, + diskName: newDiskName, + vnetName: vnetName, + nicName: newNicName, + nsgName: newNsgName, + subnetName: subnetName, + publicIPName: publicIPName, + privateAddr: private, + publicAddr: public, // If newly created one, this is "" + } + + pool.workerNum += 1 + pool.nextId = pool.workerNum + 1 + + (*pool.workers)[azworker.workerId] = azworker + worker.workerId = azworker.workerId + worker.workerIp = azworker.privateAddr + + return nil +} + +func (worker *Worker) start(firstTime bool) error { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + + // user, err := user.Current() + if err != nil { + panic(err) + } + + pid_namespace := "echo 500000 | sudo tee /proc/sys/user/max_pid_namespaces" + ipc_namespace := "echo 500000 | sudo tee /proc/sys/user/max_ipc_namespaces" + uts_namespace := "echo 500000 | sudo tee /proc/sys/user/max_uts_namespaces" + + python_path := test_path + + workerNum := len(worker.pool.workers[RUNNING]) + len(worker.pool.workers[STARTING]) + run_deploy_funcs := fmt.Sprintf("sudo python3 write_funcs.py %d", workerNum) + + run_one_time := "sudo python3 run_worker.py" + + var run_worker_up string + run_worker_up = fmt.Sprintf("sudo ./ol worker up -i ol-min -d -o import_cache_tree=%s,worker_url=0.0.0.0,features.warmup=false,limits.mem_mb=500,mem_pool_mb=%d,trace.evictor=true", tree_path, worker_mem) + + var cmd string + if firstTime { + cmd = fmt.Sprintf("%s; %s; %s; %s; cd %s; %s; cd %s; %s; %s; cd %s; %s", + "sudo mount -o rw,remount /sys/fs/cgroup", + pid_namespace, + ipc_namespace, + uts_namespace, + cwd, + "sudo ./ol worker init -i ol-min", + python_path, + run_one_time, + run_deploy_funcs, + cwd, + run_worker_up, + ) + } else { + cmd = fmt.Sprintf("%s; cd %s; %s", + "sudo mount -o rw,remount /sys/fs/cgroup", + cwd, + run_worker_up, + ) + } + + tries := 5 + for tries > 0 { + sshcmd := exec.Command("ssh", "-i", ssh_key_path, "azureuser"+"@"+worker.workerIp, "-o", "StrictHostKeyChecking=no", "-C", cmd) + stdoutStderr, err := sshcmd.CombinedOutput() + fmt.Printf("%s\n", stdoutStderr) + if err == nil { + break + } + tries -= 1 + if tries == 0 { + log.Println("sshing into the worker:", sshcmd.String()) + return err + } + time.Sleep(5 * time.Second) + } + return nil +} + +func (worker *Worker) killWorker() { + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + // user, err := user.Current() + if err != nil { + panic(err) + } + cmd := fmt.Sprintf("cd %s; %s", cwd, "sudo ./ol worker down") + log.Printf("Try to ssh into the worker and kill the process") + tries := 10 + for tries > 0 { + log.Printf("debug: %s\n", worker.workerIp) + sshcmd := exec.Command("ssh", "-i", ssh_key_path, "azureuser"+"@"+worker.workerIp, "-o", "StrictHostKeyChecking=no", "-C", cmd) + stdoutStderr, err := sshcmd.CombinedOutput() + fmt.Printf("%s\n", stdoutStderr) + if err == nil { + break + } + tries -= 1 + if tries == 0 { + fmt.Println(sshcmd.String()) + panic(err) + } + time.Sleep(5 * time.Second) + } +} + +var conf_lock sync.Mutex + +func (pool *AzureWorkerPool) DeleteInstance(generalworker *Worker) error { + worker := (*pool.workers)[generalworker.workerId] + + // delete the vm + log.Printf("Try to delete the vm") + generalworker.killWorker() + cleanupVM(worker) + + // shrink length + conf_lock.Lock() + defer conf_lock.Unlock() + + conf, _ := ReadAzureConfig() + conf.Resource_groups.Rgroup[0].Numvm -= 1 + // shrink slice + conf.Resource_groups.Rgroup[0].Vms[worker.configPosit] = conf.Resource_groups.Rgroup[0].Vms[len(conf.Resource_groups.Rgroup[0].Vms)-1] + conf.Resource_groups.Rgroup[0].Vms = conf.Resource_groups.Rgroup[0].Vms[:conf.Resource_groups.Rgroup[0].Numvm] + if len(conf.Resource_groups.Rgroup[0].Vms) > 0 && worker.configPosit < conf.Resource_groups.Rgroup[0].Numvm { + // if all workers has been deleted, don't do this + // if the worker to be deleted is at the end of the list, don't do this + + //TODO: fix this..? + (*worker.pool.workers)[*conf.Resource_groups.Rgroup[0].Vms[worker.configPosit].Vm.Name].configPosit = worker.configPosit + } + worker.pool.workerNum -= 1 + WriteAzureConfig(conf) + log.Printf("Deleted the worker and worker VM successfully\n") + + return nil +} + +func (pool *AzureWorkerPool) ForwardTask(w http.ResponseWriter, r *http.Request, worker *Worker) { + err := forwardTaskHelper(w, r, worker.workerIp) + if err != nil { + log.Printf("%s", err.Error()) + } +} diff --git a/src/boss/cloudvm/config.go b/src/boss/cloudvm/config.go index 16c547bd0..14cece181 100644 --- a/src/boss/cloudvm/config.go +++ b/src/boss/cloudvm/config.go @@ -3,21 +3,39 @@ package cloudvm import ( "encoding/json" "log" + + "io/ioutil" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" ) var GcpConf *GcpConfig +var AzureConf *AzureConfig +var tree_path string +var worker_mem int type GcpConfig struct { - DiskSizeGb int `json:"disk_size_gb"` - MachineType string `json:"machine_type"` + DiskSizeGb int `json:"disk_size_gb"` + MachineType string `json:"machine_type"` +} + +func LoadTreePath(path string) { + tree_path = path +} + +func LoadWorkerMem(mem int) { + worker_mem = mem } func GetGcpConfigDefaults() *GcpConfig { return &GcpConfig{ - DiskSizeGb: 30, + DiskSizeGb: 30, MachineType: "e2-medium", } -} +} func LoadGcpConfig(newConf *GcpConfig) { GcpConf = newConf @@ -39,4 +57,127 @@ func DumpConfStr() string { panic(err) } return string(s) -} \ No newline at end of file +} + +type AzureConfig struct { + Resource_groups rgroups `json:"azure_config"` + Snapshot_updated bool `json:"snapshot_created"` +} + +// TODO: Rightnow we default to have only one resource group +type rgroups struct { + Rgroup []rgroup `json:"resource_groups"` + Numrgroup int `json:"resource_groups_number"` +} + +type rgroup struct { + Resource armresources.ResourceGroup `json:"resource_group"` + Vms []vmStatus `json:"virtual_machine_status"` + Numvm int `json:"vm_number"` + SSHKey string `json:"ssh_key"` +} + +type vmStatus struct { + Status string `json:"virtual_machine_status"` + Vm armcompute.VirtualMachine `json:"virtual_machine"` + Virtual_net armnetwork.VirtualNetwork `json:"virtual_network"` + Subnet armnetwork.Subnet `json:"subnet"` + Public_ip armnetwork.PublicIPAddress `json:"public_ip"` + Security_group armnetwork.SecurityGroup `json:"security_group"` + Net_ifc armnetwork.Interface `json:"network_interface"` +} + +func isExists(path string) (os.FileInfo, bool) { + f, err := os.Stat(path) + return f, err == nil || os.IsExist(err) +} + +// if its dir +func isDir(path string) (os.FileInfo, bool) { + f, flag := isExists(path) + return f, flag && f.IsDir() +} + +// if its file +func isFile(path string) (os.FileInfo, bool) { + f, flag := isExists(path) + return f, flag && !f.IsDir() +} + +func LoadAzureConfig(newConf *AzureConfig) { + AzureConf = newConf +} + +func GetAzureConfigDefaults() *AzureConfig { + rg := &rgroup{ + Numvm: 0, + SSHKey: "~/.ssh/ol-boss_key.pem", + } + + rgs := &rgroups{ + Numrgroup: 1, + } + rgs.Rgroup = append(rgs.Rgroup, *rg) + + conf := &AzureConfig{ + Resource_groups: *rgs, + Snapshot_updated: false, + } + + path := "azure.json" + var content []byte + content, err := json.MarshalIndent(conf, "", "\t") + if err != nil { + panic(err) + } + + if err = ioutil.WriteFile(path, content, 0666); err != nil { + panic(err) + } + return conf +} + +func ReadAzureConfig() (*AzureConfig, error) { + path := "azure.json" + _, b := isFile(path) + var file *os.File + var err error + var byteValue []byte + + conf := new(AzureConfig) + + if b { + if file, err = os.Open(path); err != nil { + return nil, err + } + if byteValue, err = ioutil.ReadAll(file); err != nil { + return nil, err + } + json.Unmarshal([]byte(byteValue), conf) + } else { + if file, err = os.Create(path); err != nil { + return nil, err + } + conf = GetAzureConfigDefaults() + } + + err = file.Close() + if err != nil { + return nil, err + } + return conf, err +} + +func WriteAzureConfig(conf *AzureConfig) error { + path := "azure.json" + var content []byte + + content, err := json.MarshalIndent(conf, "", "\t") + if err != nil { + return err + } + if err = ioutil.WriteFile(path, content, 0666); err != nil { + return err + } + return nil +} diff --git a/src/boss/cloudvm/gcp.go b/src/boss/cloudvm/gcp.go index 6d32abcc1..5e7e6d8b5 100644 --- a/src/boss/cloudvm/gcp.go +++ b/src/boss/cloudvm/gcp.go @@ -68,7 +68,8 @@ func GcpBossTest() { } fmt.Printf("Region: %s\nZone: %s\n", region, zone) - fmt.Printf("STEP 2: lookup instance from IP address\n") + fmt.Printf("STEP 1a: lookup region and zone from metadata server\n") + instance, err := client.GcpInstanceName() if err != nil { panic(err) @@ -106,18 +107,41 @@ func GcpBossTest() { panic(err) } + fmt.Printf("STEP 6: stop instance\n") + resp, err = client.Wait(client.stopGcpInstance("test-vm")) + start = time.Now() + + if err != nil && resp["error"].(map[string]any)["code"] != "409" { //continue if instance already exists error + fmt.Printf("instance alreay exists!\n") + client.startGcpInstance("test-vm") + } else if err != nil { + panic(err) + } + + fmt.Printf("snapshot time: %d\n", snapshot_time.Milliseconds()) + fmt.Printf("clone time: %d\n", clone_time.Milliseconds()) + + fmt.Printf("STEP 5: start worker\n") + err = client.RunComandWorker("test-vm", "./ol worker --detach") + if err != nil { + panic(err) + } + fmt.Printf("STEP 6: stop instance\n") resp, err = client.Wait(client.stopGcpInstance("test-vm")) if err != nil { panic(err) } + fmt.Printf("STEP 7: delete instance\n") + resp, err = client.deleteGcpInstance("test-vm") fmt.Printf("STEP 7: delete instance\n") resp, err = client.deleteGcpInstance("test-vm") if err != nil { panic(err) } fmt.Printf("Test Succeeded!\n") + fmt.Printf("Test Succeeded!\n") } func NewGcpClient(service_account_json string) (*GcpClient, error) { @@ -161,6 +185,7 @@ func (c *GcpClient) RunComandWorker(VMName string, command string) error { } ip, ok := lookup[VMName] + if !ok { fmt.Println(lookup) panic(fmt.Errorf("could not find IP for instance")) @@ -240,6 +265,7 @@ func (c *GcpClient) get(url string) (rv map[string]any, err error) { defer func() { if err != nil { err = fmt.Errorf("GET to %s failed: %s", url, err.Error()) + err = fmt.Errorf("GET to %s failed: %s", url, err.Error()) } }() @@ -466,9 +492,10 @@ func (c *GcpClient) Wait(resp1 map[string]any, err1 error) (resp2 map[string]any func (c *GcpClient) GcpSnapshot(disk string, snapshot_name string) (map[string]any, error) { args := GcpSnapshotArgs{ - Project: c.service_account["project_id"].(string), - Region: c.service_account["region"].(string), - Zone: c.service_account["zone"].(string), + Project: c.service_account["project_id"].(string), + Region: c.service_account["region"].(string), + Zone: c.service_account["zone"].(string), + Disk: disk, SnapshotName: snapshot_name, } @@ -493,9 +520,9 @@ func (c *GcpClient) LaunchGcp(SnapshotName string, VMName string) (map[string]an Zone: c.service_account["zone"].(string), InstanceName: VMName, //SourceImage: "projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20220204", - SnapshotName: SnapshotName, - DiskSizeGb: GcpConf.DiskSizeGb, - MachineType: GcpConf.MachineType, + SnapshotName: SnapshotName, + DiskSizeGb: GcpConf.DiskSizeGb, + MachineType: GcpConf.MachineType, } url := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances", diff --git a/src/boss/cloudvm/gcp_rest.go b/src/boss/cloudvm/gcp_rest.go index 006112912..a750ed512 100644 --- a/src/boss/cloudvm/gcp_rest.go +++ b/src/boss/cloudvm/gcp_rest.go @@ -7,9 +7,9 @@ type GcpLaunchVmArgs struct { Zone string InstanceName string //SourceImage string - SnapshotName string - DiskSizeGb int - MachineType string + SnapshotName string + DiskSizeGb int + MachineType string } type GcpSnapshotArgs struct { @@ -45,6 +45,7 @@ const gcpLaunchVmJSON = `{ "deviceName": "{{.InstanceName}}", "diskEncryptionKey": {}, "initializeParams": { + "diskSizeGb": "{{.DiskSizeGb}}", "diskSizeGb": "{{.DiskSizeGb}}", "diskType": "projects/{{.Project}}/zones/{{.Zone}}/diskTypes/pd-balanced", "labels": {}, @@ -60,6 +61,7 @@ const gcpLaunchVmJSON = `{ "guestAccelerators": [], "labels": {}, "machineType": "projects/{{.Project}}/zones/{{.Zone}}/machineTypes/{{.MachineType}}", + "machineType": "projects/{{.Project}}/zones/{{.Zone}}/machineTypes/{{.MachineType}}", "metadata": { "items": [] }, diff --git a/src/boss/cloudvm/gcp_worker.go b/src/boss/cloudvm/gcp_worker.go index fb13020a6..fb56a6a06 100644 --- a/src/boss/cloudvm/gcp_worker.go +++ b/src/boss/cloudvm/gcp_worker.go @@ -3,10 +3,10 @@ package cloudvm import ( "fmt" "log" + "net/http" "os" "path/filepath" "strings" - "net/http" ) type GcpWorkerPool struct { @@ -81,12 +81,12 @@ func NewGcpWorkerPool() *WorkerPool { func (pool *GcpWorkerPool) NewWorker(workerId string) *Worker { return &Worker{ - workerId: workerId, - workerIp: "", + workerId: workerId, + workerIp: "", } } -func (pool *GcpWorkerPool) CreateInstance(worker *Worker) { +func (pool *GcpWorkerPool) CreateInstance(worker *Worker) error { client := pool.client fmt.Printf("creating new VM from snapshot\n") @@ -105,12 +105,15 @@ func (pool *GcpWorkerPool) CreateInstance(worker *Worker) { } worker.workerIp = lookup[worker.workerId] + + return nil } -func (pool *GcpWorkerPool) DeleteInstance(worker *Worker) { +func (pool *GcpWorkerPool) DeleteInstance(worker *Worker) error { log.Printf("deleting gcp worker: %s\n", worker.workerId) worker.runCmd("./ol worker down") pool.client.Wait(pool.client.deleteGcpInstance(worker.workerId)) //wait until instance is completely deleted + return nil } func (pool *GcpWorkerPool) ForwardTask(w http.ResponseWriter, r *http.Request, worker *Worker) { diff --git a/src/boss/cloudvm/mock_worker.go b/src/boss/cloudvm/mock_worker.go index 0ad449ce6..5546d221d 100644 --- a/src/boss/cloudvm/mock_worker.go +++ b/src/boss/cloudvm/mock_worker.go @@ -28,12 +28,14 @@ func (pool *MockWorkerPool) NewWorker(workerId string) *Worker { } } -func (pool *MockWorkerPool) CreateInstance(worker *Worker) { +func (pool *MockWorkerPool) CreateInstance(worker *Worker) error { log.Printf("created new mock worker: %s\n", worker.workerId) + return nil } -func (pool *MockWorkerPool) DeleteInstance(worker *Worker) { +func (pool *MockWorkerPool) DeleteInstance(worker *Worker) error { log.Printf("deleted mock worker: %s\n", worker.workerId) + return nil } func (pool *MockWorkerPool) ForwardTask(w http.ResponseWriter, r *http.Request, worker *Worker) { diff --git a/src/boss/cloudvm/worker.go b/src/boss/cloudvm/worker.go index 15d9dd1ee..ff649afb9 100644 --- a/src/boss/cloudvm/worker.go +++ b/src/boss/cloudvm/worker.go @@ -1,6 +1,8 @@ package cloudvm import ( + "bufio" + "errors" "fmt" "io" "log" @@ -8,9 +10,12 @@ import ( "os" "os/exec" "os/user" + "strconv" + "strings" "sync/atomic" "time" - "errors" + + "github.com/open-lambda/open-lambda/ol/boss/loadbalancer" ) func NewWorkerPool(platform string, worker_cap int) (*WorkerPool, error) { @@ -23,13 +28,18 @@ func NewWorkerPool(platform string, worker_cap int) (*WorkerPool, error) { var pool *WorkerPool switch { - case platform == "mock": - pool = NewMockWorkerPool() + case platform == "mock": + pool = NewMockWorkerPool() case platform == "gcp": - pool = NewGcpWorkerPool() - default: - return nil, errors.New("invalid cloud platform") - } + pool = NewGcpWorkerPool() + case platform == "azure": + pool, err = NewAzureWorkerPool() + if err != nil { + return nil, err + } + default: + return nil, errors.New("invalid cloud platform") + } pool.nextId = 1 pool.workers = []map[string]*Worker{ @@ -48,6 +58,19 @@ func NewWorkerPool(platform string, worker_cap int) (*WorkerPool, error) { pool.sumLatency = 0 pool.platform = platform pool.worker_cap = worker_cap + // TODO: this is hard-coded. Need to set it changable + pool.numGroup = loadbalancer.MaxGroup + pool.groups = make(map[int]*GroupWorker) + pool.nextGroup = 0 + + pool.taksId = 0 + pool.rr_index = 0 + pool.rr_queue = make([]*Worker, 0) + + pool.workers_queue = make(map[*Worker]chan string, 5) + + // This is for traces used to foward tasks + loadbalancer.Traces = loadbalancer.LoadTrace() log.Printf("READY: worker pool of type %s", platform) @@ -102,9 +125,13 @@ func (pool *WorkerPool) startNewWorker() { pool.Lock() log.Printf("starting new worker\n") + nextId := pool.nextId pool.nextId += 1 worker := pool.NewWorker(fmt.Sprintf("worker-%d", nextId)) + logPath := fmt.Sprintf("%s_funcLog.log", worker.workerId) + funcLogFile, _ := os.Create(logPath) + funcLog := log.New(funcLogFile, "", 0) worker.state = STARTING pool.workers[STARTING][worker.workerId] = worker pool.clusterLog.Printf("%s: starting [target=%d, starting=%d, running=%d, cleaning=%d, destroying=%d]", @@ -113,15 +140,42 @@ func (pool *WorkerPool) startNewWorker() { len(pool.workers[RUNNING]), len(pool.workers[CLEANING]), len(pool.workers[DESTROYING])) + worker.funcLog = funcLog + worker.allTaks = 0 + worker.pool = pool + + pool.workers_queue[worker] = make(chan string, 5) pool.Unlock() go func() { // should be able to create multiple instances simultaneously worker.numTask = 1 - pool.CreateInstance(worker) //create new instance - - if pool.platform != "mock" { + err := pool.CreateInstance(worker) //create new instance + if err != nil { + log.Fatalf(err.Error()) + } + // TODO: need to handle this error, not panic (may use channel?) + workerIdDigit, err := strconv.Atoi(getAfterSep(worker.workerId, "-")) + // Assign the worker to the group + // TODO: need to find the most busy worker and double it + var assignedGroup int + if loadbalancer.MaxGroup == 1 { + assignedGroup = 0 + } else { + assignedGroup = workerIdDigit%loadbalancer.MaxGroup - 1 // -1 because starts from 0 + if assignedGroup == -1 { + assignedGroup = loadbalancer.MaxGroup - 1 + } + } + // fmt.Printf("Debug: %d\n", assignedGroup) + if pool.platform == "gcp" { worker.runCmd("./ol worker up -d") // start worker + } else if pool.platform == "azure" { + err = worker.start(true) + if err != nil { + // TODO: Handle error (may use channel?) + log.Fatalln(err) + } } //change state starting -> running @@ -138,8 +192,30 @@ func (pool *WorkerPool) startNewWorker() { len(pool.workers[CLEANING]), len(pool.workers[DESTROYING])) pool.queue <- worker + pool.rr_queue = append(pool.rr_queue, worker) log.Printf("%s ready\n", worker.workerId) worker.numTask = 0 + // update the worker's assigned group + // random lb don't need to assign a worker to a group + if loadbalancer.Lb.LbType != loadbalancer.Random { + worker.groupId = assignedGroup + + // update the group stuff in pool + if _, ok := pool.groups[assignedGroup]; !ok { + // this group hasn't been created + pool.groups[assignedGroup] = &GroupWorker{ + groupId: pool.nextGroup, + groupWorkers: make(map[string]*Worker), + } + if loadbalancer.MaxGroup != 1 { + pool.nextGroup += 1 + pool.nextGroup %= loadbalancer.MaxGroup - 1 + } + } + fmt.Printf("Debug: %d\n", assignedGroup) + group := pool.groups[assignedGroup] + group.groupWorkers[worker.workerId] = worker + } pool.Unlock() @@ -162,8 +238,12 @@ func (pool *WorkerPool) recoverWorker(worker *Worker) { len(pool.workers[RUNNING]), len(pool.workers[CLEANING]), len(pool.workers[DESTROYING])) - - pool.Unlock() + + // group stuff + if loadbalancer.Lb.LbType != loadbalancer.Random { + workerGroup := pool.groups[worker.groupId] + workerGroup.groupWorkers[worker.workerId] = worker + } pool.updateCluster() } @@ -184,6 +264,12 @@ func (pool *WorkerPool) cleanWorker(worker *Worker) { len(pool.workers[CLEANING]), len(pool.workers[DESTROYING])) + // group stuff + if loadbalancer.Lb.LbType != loadbalancer.Random { + workerGroup := pool.groups[worker.groupId] + delete(workerGroup.groupWorkers, worker.workerId) + } + pool.Unlock() go func(worker *Worker) { @@ -241,6 +327,17 @@ func (pool *WorkerPool) detroyWorker(worker *Worker) { // called when worker is been evicted from cleaning or destroying map func (pool *WorkerPool) updateCluster() { + if loadbalancer.Lb.LbType == loadbalancer.Sharding { + pool.Lock() + numShards := loadbalancer.MaxGroup + if len(pool.workers[RUNNING]) <= loadbalancer.MaxGroup { + numShards = len(pool.workers[RUNNING]) + } + fmt.Println(len(pool.workers[RUNNING])) + loadbalancer.UpdateShard(numShards, 2) + pool.Unlock() + } + scaleSize := pool.target - pool.Size() // scaleSize = target - size of cluster if scaleSize > 0 { @@ -284,24 +381,275 @@ func (pool *WorkerPool) updateCluster() { } } +func getAfterSep(str string, sep string) string { + res := "" + if idx := strings.LastIndex(str, sep); idx != -1 { + res = str[idx+1:] + } + return res +} + +// getURLComponents parses request URL into its "/" delimated components +func getURLComponents(r *http.Request) []string { + path := r.URL.Path + + // trim prefix + if strings.HasPrefix(path, "/") { + path = path[1:] + } + + // trim trailing "/" + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + components := strings.Split(path, "/") + return components +} + +func readFirstLine(path string) string { + file, err := os.Open(path) + var res string + if err != nil { + log.Fatalf("Failed to open file: %s", err) + } + scanner := bufio.NewScanner(file) + if scanner.Scan() { + res = scanner.Text() // Outputs the first line + } + + // Check for errors during scanning + if err := scanner.Err(); err != nil { + log.Fatalf("Error reading file: %s", err) + } + defer file.Close() + return res +} + +func isStrExists(str string, list []string) bool { + exists := false + for _, s := range list { + if s == str { + exists = true + break + } + } + return exists +} + +func getPkgs(img string) ([]string, error) { + var pkgs []string + content := loadbalancer.Requirements[img] + scanner := bufio.NewScanner(strings.NewReader(content)) + + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + + // Ignore comments and empty lines + if strings.HasPrefix(line, "#") || line == "" { + continue + } + + // Exclude the part after '[' or ';' + if idx := strings.IndexAny(line, "[;"); idx != -1 { + line = line[:idx] + line = strings.TrimSpace(line) // Trim space again as splitting might leave whitespace + } + + pkgs = append(pkgs, line) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return pkgs, nil +} + //run lambda function func (pool *WorkerPool) RunLambda(w http.ResponseWriter, r *http.Request) { + var thisTask string starttime := time.Now() + + assignSuccess := false if len(pool.workers[STARTING])+len(pool.workers[RUNNING]) == 0 { w.WriteHeader(http.StatusInternalServerError) } + // fmt.Println("Debug 1") + var worker *Worker + var img string + urlParts := getURLComponents(r) + if len(urlParts) < 2 { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("expected invocation format: /run/")) + return + } + img = urlParts[1] + thisTask = img + if loadbalancer.Lb.LbType == loadbalancer.Random { + // fmt.Println("Debug 2") + worker = pool.rr_queue[pool.rr_index] + pool.rr_index = (pool.rr_index + 1) % len(pool.rr_queue) + // fmt.Println("Debug 3") + } else { + // TODO: what if the designated worker isn't up yet? + // Current solution: then randomly choose one that is up + // step 1: get its dependencies + + var pkgs []string + // components represent run[0]/[1]/... + // ergo we want [1] for name of sandbox + // TODO: if user changes the code, one worker will know that, boss cannot know that. How to handle this? + + pkgs, err = getPkgs(img) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("failed to get function's dependency packages")) + return + } + + var targetGroups []int + var targetGroup int + // Sharding: get the target group + if loadbalancer.Lb.LbType == loadbalancer.Sharding { + targetGroups, err = loadbalancer.ShardingGetGroup(pkgs) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + } + + if loadbalancer.Lb.LbType == loadbalancer.HashFunc { + targetGroup = loadbalancer.HashFuncGetGroup(img, len(pool.workers[RUNNING])) + // fmt.Println(targetGroup) + targetGroups = append(targetGroups, targetGroup) + } + + if loadbalancer.Lb.LbType == loadbalancer.HashZygote { + targetGroup = loadbalancer.HashZygoteGetGroup(pkgs, len(pool.workers[RUNNING])) + // fmt.Println(targetGroup) + targetGroups = append(targetGroups, targetGroup) + } + + // KMeans/KModes: get the target group + if loadbalancer.Lb.LbType == loadbalancer.KModes || loadbalancer.Lb.LbType == loadbalancer.KMeans { + // get a vector + path := "call_matrix_cols.csv" + firstLine := readFirstLine(path) + matrix_pkgs := strings.Split(firstLine, ",") + matrix_pkgs = matrix_pkgs[1:] + var vec_matrix []int + for _, name := range matrix_pkgs { + if isStrExists(name, pkgs) { + vec_matrix = append(vec_matrix, 1) + } else { + vec_matrix = append(vec_matrix, 0) + } + } + // step 2: get assigned group + if loadbalancer.Lb.LbType == loadbalancer.KMeans { + vec_float := make([]float64, len(vec_matrix)) + for i, v := range vec_matrix { + vec_float[i] = float64(v) + } + targetGroup, err = loadbalancer.KMeansGetGroup(vec_float) + } else if loadbalancer.Lb.LbType == loadbalancer.KModes { + targetGroup, err = loadbalancer.KModesGetGroup(vec_matrix) + } + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + // fmt.Printf("Debug: targetGroup: %d\n", targetGroup) + targetGroups = append(targetGroups, targetGroup) + } + // step3: get assigned worker randomly + assignSuccess = false + // Might be problem: shoud I add lock here? + smallest_numTask := 100000 + for _, target := range targetGroups { + // fmt.Printf("Debug0: target %d\n", target) + if group, ok := pool.groups[target]; ok { // exists this group + // fmt.Println(len(group.groupWorkers)) + if len(group.groupWorkers) > 0 { + for _, thisWorker := range group.groupWorkers { + if int(thisWorker.numTask) < smallest_numTask { + smallest_numTask = int(thisWorker.numTask) + worker = thisWorker + assignSuccess = true + } + } + } + } + } + // if assign to a worker failed, randomly pick one + if !assignSuccess { + fmt.Printf("Debug1\n") + // fmt.Println("assign to a group (Shard/KMeans/KModes) failed") + worker = <-pool.queue + pool.queue <- worker + } + + } + // fmt.Println("Debug 4") + // fmt.Printf("Debug: function %s sent to %s\n", thisTask, assigned) + + // a simple load balancer based on worker's processed tasks + assigned := worker.workerId + var smallWorker *Worker + var smallWorkerTask int32 + smallWorkerTask = 10000 + for _, curWorker := range pool.workers[RUNNING] { + if curWorker.numTask < smallWorkerTask { + smallWorkerTask = curWorker.numTask + smallWorker = curWorker + } + } + if smallWorkerTask < (worker.numTask - 5) { + worker = smallWorker + } + + // fmt.Printf("Debug: function %s assigned to %s\n", thisTask, assigned) + + // another load balancer implementation + // insert the request's name to the designated queue + // if it's full, randomly pick another one that's not full + + assignTime := time.Since(starttime).Microseconds() + + // fmt.Println("Debug 5") - worker := <-pool.queue - pool.queue <- worker atomic.AddInt32(&worker.numTask, 1) atomic.AddInt32(&pool.totalTask, 1) + atomic.AddInt32(&worker.allTaks, 1) pool.ForwardTask(w, r, worker) + // fmt.Println("Debug 6") + atomic.AddInt32(&worker.numTask, -1) atomic.AddInt32(&pool.totalTask, -1) latency := time.Since(starttime).Milliseconds() + endtime := time.Now() + startFormat := starttime.Format("15:04:05.0000") + endFormat := endtime.Format("15:04:05.0000") + + // pool.Lock() + if loadbalancer.Lb.LbType == loadbalancer.Random { + worker.funcLog.Printf("{\"workernum\": %d, \"policy\": %d, \"task\": \"%s\", \"start\": \"%s\", \"end\": \"%s\", \"time\": %d, \"assignTime\": %d, \"assign\": \"Random\", \"assigned\": \"Random\"}\n", len(pool.workers[RUNNING]), loadbalancer.Lb.LbType, thisTask, startFormat, endFormat, latency, assignTime) + } else { + if assignSuccess { + worker.funcLog.Printf("{\"workernum\": %d, \"policy\": %d, \"task\": \"%s\", \"start\": \"%s\", \"end\": \"%s\", \"time\": %d, \"assignTime\": %d, \"assign\": \"Success\", \"assigned\": \"%s\", \"assignedIP\": \"%s\"}\n", len(pool.workers[RUNNING]), loadbalancer.Lb.LbType, thisTask, startFormat, endFormat, latency, assignTime, assigned, worker.workerIp) + } else { + worker.funcLog.Printf("{\"workernum\": %d, \"policy\": %d, \"task\": \"%s\", \"start\": \"%s\", \"end\": \"%s\", \"time\": %d, \"assignTime\": %d, \"assign\": \"Unsuccess\", \"assigned\": \"%s\", \"assignedIP\": \"%s\"}\n", len(pool.workers[RUNNING]), loadbalancer.Lb.LbType, thisTask, startFormat, endFormat, latency, assignTime, assigned, worker.workerIp) + } + } + // pool.Unlock() atomic.AddInt64(&pool.sumLatency, latency) atomic.AddInt64(&pool.nLatency, 1) @@ -339,7 +687,12 @@ func (w *Worker) runCmd(command string) { tries := 10 for tries > 0 { - sshcmd := exec.Command("ssh", user.Username+"@"+w.workerIp, "-o", "StrictHostKeyChecking=no", "-C", cmd) + var sshcmd *exec.Cmd + if w.pool.platform == "azure" { + sshcmd = exec.Command("ssh", "-i", AzureConf.Resource_groups.Rgroup[0].SSHKey, user.Username+"@"+w.workerIp, "-o", "StrictHostKeyChecking=no", "-C", cmd) + } else if w.pool.platform == "gcp" { + sshcmd = exec.Command("ssh", user.Username+"@"+w.workerIp, "-o", "StrictHostKeyChecking=no", "-C", cmd) + } stdoutStderr, err := sshcmd.CombinedOutput() log.Printf("%s\n", stdoutStderr) if err == nil { @@ -390,8 +743,121 @@ func (pool *WorkerPool) StatusCluster() map[string]int { return output } +// restart OL process in all workers +func (pool *WorkerPool) Restart() { + for _, cur_worker := range pool.workers[RUNNING] { + cur_worker := cur_worker + + cur_worker.killWorker() + cur_worker.start(false) + + if loadbalancer.Lb.LbType != loadbalancer.Random { + workerIdDigit, _ := strconv.Atoi(getAfterSep(cur_worker.workerId, "-")) + var assignedGroup int + if loadbalancer.MaxGroup == 1 { + assignedGroup = 0 + } else { + assignedGroup = workerIdDigit%loadbalancer.MaxGroup - 1 // -1 because starts from 0 + if assignedGroup == -1 { + assignedGroup = loadbalancer.MaxGroup - 1 + } + } + + cur_worker.groupId = assignedGroup + + // update the group stuff in pool + if _, ok := pool.groups[assignedGroup]; !ok { + // this group hasn't been created + pool.groups[assignedGroup] = &GroupWorker{ + groupId: pool.nextGroup, + groupWorkers: make(map[string]*Worker), + } + if loadbalancer.MaxGroup != 1 { + pool.nextGroup += 1 + pool.nextGroup %= loadbalancer.MaxGroup - 1 + } + } + fmt.Printf("Debug: %d\n", assignedGroup) + group := pool.groups[assignedGroup] + group.groupWorkers[cur_worker.workerId] = cur_worker + + } + + } +} + +func (pool *WorkerPool) ChangeTree(tree string) { + pool.Lock() + // if tree_path == tree { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.Lb.LbType, loadbalancer.MaxGroup, tree) + pool.Unlock() + + pool.Restart() + + pool.updateCluster() +} + +func (pool *WorkerPool) ChangeMem(mem int) { + pool.Lock() + LoadWorkerMem(mem) + pool.Unlock() + + pool.Restart() + + pool.updateCluster() +} + +func (pool *WorkerPool) ChangePolicy(policy string) { + pool.Lock() + if policy == "random" { + // if loadbalancer.Lb.LbType == loadbalancer.Random { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.Random, loadbalancer.MaxGroup, tree_path) + } + if policy == "sharding" { + // if loadbalancer.Lb.LbType == loadbalancer.Sharding { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.Sharding, loadbalancer.MaxGroup, tree_path) + } + if policy == "kmeans" { + // if loadbalancer.Lb.LbType == loadbalancer.KMeans { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.KMeans, loadbalancer.MaxGroup, tree_path) + } + if policy == "kmodes" { + // if loadbalancer.Lb.LbType == loadbalancer.KModes { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.KModes, loadbalancer.MaxGroup, tree_path) + } + if policy == "hashfunc" { + // if loadbalancer.Lb.LbType == loadbalancer.Hash { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.HashFunc, loadbalancer.MaxGroup, tree_path) + } + if policy == "hashzygote" { + // if loadbalancer.Lb.LbType == loadbalancer.Hash { + // return + // } + loadbalancer.InitLoadBalancer(loadbalancer.HashZygote, loadbalancer.MaxGroup, tree_path) + } + pool.Unlock() + + pool.Restart() + + pool.updateCluster() +} + // forward request to worker // TODO: this is kept for other platforms +var client = &http.Client{} + func forwardTaskHelper(w http.ResponseWriter, req *http.Request, workerIp string) error { host := fmt.Sprintf("%s:%d", workerIp, 5000) //TODO: read from config req.URL.Scheme = "http" @@ -399,7 +865,6 @@ func forwardTaskHelper(w http.ResponseWriter, req *http.Request, workerIp string req.Host = host req.RequestURI = "" - client := http.Client{} resp, err := client.Do(req) if err != nil { http.Error(w, err.Error(), http.StatusBadGateway) diff --git a/src/boss/config.go b/src/boss/config.go index d08452647..fe025fd37 100644 --- a/src/boss/config.go +++ b/src/boss/config.go @@ -5,7 +5,10 @@ import ( "fmt" "io/ioutil" "log" + "os" + "github.com/open-lambda/open-lambda/ol/boss/cloudvm" + "github.com/open-lambda/open-lambda/ol/boss/loadbalancer" ) var Conf *Config @@ -16,22 +19,47 @@ type Config struct { API_key string `json:"api_key"` Boss_port string `json:"boss_port"` Worker_Cap int `json:"worker_cap"` - Gcp *cloudvm.GcpConfig `json:"gcp"` + Azure cloudvm.AzureConfig `json:"azure"` + Gcp cloudvm.GcpConfig `json:"gcp"` + Lb string `json:"lb"` + MaxGroup int `json:"max_group"` + Tree_path string `json:"tree_path"` + Worker_mem int `json:"worker_mem"` } func LoadDefaults() error { + olPath, err := os.Getwd() + if err != nil { + log.Println("Error getting executable path:", err) + return err + } + tree_path := fmt.Sprintf("%s/default-zygote-40.json", olPath) + Conf = &Config{ Platform: "mock", Scaling: "manual", - API_key: "abc", // TODO: autogenerate a random key + API_key: "abc", // TODO Boss_port: "5000", - Worker_Cap: 4, - Gcp: cloudvm.GetGcpConfigDefaults(), + Worker_Cap: 20, + Azure: *cloudvm.GetAzureConfigDefaults(), + Gcp: *cloudvm.GetGcpConfigDefaults(), + Lb: "random", + MaxGroup: 5, + Tree_path: tree_path, + Worker_mem: 32768, } return checkConf() } +func Max(x int, y int) int { + if x > y { + return x + } + + return y +} + // ParseConfig reads a file and tries to parse it as a JSON string to a Config // instance. func LoadConf(path string) error { @@ -45,7 +73,32 @@ func LoadConf(path string) error { return fmt.Errorf("could not parse config (%v): %v\n", path, err.Error()) } - cloudvm.LoadGcpConfig(Conf.Gcp) + cloudvm.LoadTreePath(Conf.Tree_path) + cloudvm.LoadWorkerMem(Conf.Worker_mem) + if Conf.Platform == "gcp" { + cloudvm.LoadGcpConfig(&Conf.Gcp) + } else if Conf.Platform == "azure" { + cloudvm.LoadAzureConfig(&Conf.Azure) + } + + if Conf.Lb == "random" { + loadbalancer.InitLoadBalancer(loadbalancer.Random, Conf.MaxGroup, Conf.Tree_path) + } + if Conf.Lb == "sharding" { + loadbalancer.InitLoadBalancer(loadbalancer.Sharding, Conf.MaxGroup, Conf.Tree_path) + } + if Conf.Lb == "kmeans" { + loadbalancer.InitLoadBalancer(loadbalancer.KMeans, Conf.MaxGroup, Conf.Tree_path) + } + if Conf.Lb == "kmodes" { + loadbalancer.InitLoadBalancer(loadbalancer.KModes, Conf.MaxGroup, Conf.Tree_path) + } + if Conf.Lb == "hashfunc" { + loadbalancer.InitLoadBalancer(loadbalancer.HashFunc, Conf.MaxGroup, Conf.Tree_path) + } + if Conf.Lb == "hashzygote" { + loadbalancer.InitLoadBalancer(loadbalancer.HashZygote, Conf.MaxGroup, Conf.Tree_path) + } return checkConf() } @@ -54,26 +107,11 @@ func checkConf() error { if Conf.Scaling != "manual" && Conf.Scaling != "threshold-scaler" { return fmt.Errorf("Scaling type '%s' not implemented", Conf.Scaling) } - - return nil -} - -// Dump prints the Config as a JSON string. -func DumpConf() { - s, err := json.Marshal(Conf) - if err != nil { - panic(err) + if Conf.Lb != "random" && Conf.Lb != "sharding" && Conf.Lb != "kmeans" && Conf.Lb != "kmodes" && Conf.Lb != "hashfunc" && Conf.Lb != "hashzygote" { + return fmt.Errorf("%s is not implemented", Conf.Lb) } - log.Printf("CONFIG = %v\n", string(s)) -} -// DumpStr returns the Config as an indented JSON string. -func DumpConfStr() string { - s, err := json.MarshalIndent(Conf, "", "\t") - if err != nil { - panic(err) - } - return string(s) + return nil } // Save writes the Config as an indented JSON to path with 644 mode. diff --git a/src/boss/loadbalancer/config.go b/src/boss/loadbalancer/config.go new file mode 100644 index 000000000..b721e0ebf --- /dev/null +++ b/src/boss/loadbalancer/config.go @@ -0,0 +1,89 @@ +package loadbalancer + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" +) + +const ( + Random = 0 + KMeans = 1 + KModes = 2 + Sharding = 3 + HashZygote = 4 + HashFunc = 5 +) + +var tree_path string + +var MaxGroup int +var Lb *LoadBalancer +var Requirements map[string]string + +type LoadBalancer struct { + LbType int +} + +func loadRequirements(root string) error { + + // Walk through the directory + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Check if it's a directory + if info.IsDir() && path != root { + requirementsPath := filepath.Join(path, "requirements.txt") + + // Read the contents of requirements.txt if it exists + if _, err := os.Stat(requirementsPath); err == nil { + content, err := ioutil.ReadFile(requirementsPath) + if err != nil { + return err + } + + dirName := filepath.Base(path) + Requirements[dirName] = string(content) + } + } + return nil + }) + + return err +} + +func InitLoadBalancer(lbType int, maxGroup int, path string) { + tree_path = path + if lbType != Random { + // read requirements.txt into a data structure + Requirements = make(map[string]string) + err := loadRequirements("default-ol/registry/") + if err != nil { + log.Fatalf(err.Error()) + } + if lbType == Sharding { + fmt.Println("Debug") + GetRoot() + child1 := root.Children[0] + fmt.Printf("%d\n", child1.SplitGeneration) + if err != nil { + log.Fatalf(err.Error()) + } + } + if lbType == HashZygote { + GetRoot() + initZygoteHasher() + } + if lbType == HashFunc { + initFuncHasher() + } + } + Lb = &LoadBalancer{ + LbType: lbType, + } + MaxGroup = maxGroup +} diff --git a/src/boss/loadbalancer/hashFunc.go b/src/boss/loadbalancer/hashFunc.go new file mode 100644 index 000000000..900aa26a2 --- /dev/null +++ b/src/boss/loadbalancer/hashFunc.go @@ -0,0 +1,31 @@ +package loadbalancer + +import ( + "crypto/sha256" + "math/big" +) + +func hashString(input string) int { + hasherMutex.Lock() // Lock the mutex before using the hasher + defer hasherMutex.Unlock() // Unlock the mutex when the function exits + + hasher.Reset() + hasher.Write([]byte(input)) + hashBytes := hasher.Sum(nil) + + bigIntHash := new(big.Int).SetBytes(hashBytes).Int64() + if bigIntHash < 0 { + bigIntHash = -bigIntHash + } + return int(bigIntHash) +} + +func HashFuncGetGroup(img string, running int) int { + hashInt := hashString(img) + group := hashInt % running + return group +} + +func initFuncHasher() { + hasher = sha256.New() +} diff --git a/src/boss/loadbalancer/hashZygote.go b/src/boss/loadbalancer/hashZygote.go new file mode 100644 index 000000000..78d20fa0e --- /dev/null +++ b/src/boss/loadbalancer/hashZygote.go @@ -0,0 +1,46 @@ +package loadbalancer + +import ( + "crypto/sha256" + "encoding/binary" + "hash" + "sync" +) + +var ( + hasher hash.Hash + hasherMutex sync.Mutex +) + +func hashInt(input int) int { + hasherMutex.Lock() // Lock the mutex before using the hasher + defer hasherMutex.Unlock() // Unlock the mutex when the function exits + + hasher.Reset() + buf := make([]byte, binary.MaxVarintLen64) + binary.LittleEndian.PutUint64(buf, uint64(input)) + + sum := sha256.Sum256(buf) + // Take the first few bytes to fit into an int, ensuring it's always positive + var truncatedHash int + if size := binary.Size(truncatedHash); size == 64/8 { + // 64-bit architecture + truncatedHash = int(binary.LittleEndian.Uint64(sum[:8]) &^ (1 << 63)) + } else { + // 32-bit architecture + truncatedHash = int(binary.LittleEndian.Uint32(sum[:4]) &^ (1 << 31)) + } + + return truncatedHash +} + +func HashZygoteGetGroup(pkgs []string, running int) int { + node := root.Lookup(pkgs) + hashInt := hashInt(node.SplitGeneration) + group := hashInt % running + return group +} + +func initZygoteHasher() { + hasher = sha256.New() +} diff --git a/src/boss/loadbalancer/kmeanslb.go b/src/boss/loadbalancer/kmeanslb.go new file mode 100644 index 000000000..02fedab27 --- /dev/null +++ b/src/boss/loadbalancer/kmeanslb.go @@ -0,0 +1,58 @@ +package loadbalancer + +import ( + "encoding/json" + "io/ioutil" + "math" + "os" +) + +type Point []float64 + +func loadCentroids(filename string) ([]Point, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + byteValue, _ := ioutil.ReadAll(file) + + var centroids []Point + json.Unmarshal(byteValue, ¢roids) + // for _, centroid := range centroids { + // fmt.Println(len(centroid)) + // } + return centroids, nil +} + +func assignToCluster(p Point, centroids []Point) int { + minDist := math.MaxFloat64 + minIdx := 0 + for idx, centroid := range centroids { + // fmt.Println(len(centroid), len(p)) + dist := distance(p, centroid) + if dist < minDist { + minDist = dist + minIdx = idx + } + } + return minIdx +} + +func distance(p1, p2 Point) float64 { + sum := 0.0 + for i := range p1 { + delta := p1[i] - p2[i] + sum += delta * delta + } + return math.Sqrt(sum) +} + +func KMeansGetGroup(pkgs []float64) (int, error) { + centroids, error := loadCentroids("centroids_kmeans.json") + + // Test the clustering with a new data point + cluster := assignToCluster(pkgs, centroids) + return cluster, error +} diff --git a/src/boss/loadbalancer/kmodeslb.go b/src/boss/loadbalancer/kmodeslb.go new file mode 100644 index 000000000..f0ac21f6f --- /dev/null +++ b/src/boss/loadbalancer/kmodeslb.go @@ -0,0 +1,48 @@ +package loadbalancer + +import ( + "encoding/json" + "io/ioutil" + "math" +) + +func hammingDistance(a, b []int) int { + distance := 0 + for i := range a { + if a[i] != b[i] { + distance++ + } + } + return distance +} + +func predictCluster(centroids [][]int, point []int) int { + minDistance := math.MaxInt64 + cluster := -1 + for i, centroid := range centroids { + distance := hammingDistance(centroid, point) + if distance < minDistance { + minDistance = distance + cluster = i + } + } + return cluster +} + +func KModesGetGroup(pkgs []int) (int, error) { + // Load centroids from JSON file + data, err := ioutil.ReadFile("centroids_kmodes.json") + if err != nil { + return -1, err + } + + var centroids [][]int + err = json.Unmarshal(data, ¢roids) + if err != nil { + return -1, err + } + + // Predict cluster + cluster := predictCluster(centroids, pkgs) + return cluster, nil +} diff --git a/src/boss/loadbalancer/sharding.go b/src/boss/loadbalancer/sharding.go new file mode 100644 index 000000000..3abc266d6 --- /dev/null +++ b/src/boss/loadbalancer/sharding.go @@ -0,0 +1,290 @@ +package loadbalancer + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "sort" + "strings" +) + +type Node struct { + Direct []string `json:"direct"` + Packages []string `json:"packages"` + Children []*Node `json:"children"` + SplitGeneration int `json:"split_generation"` + Count int `json:"count"` + ParentInt int `json:"parent"` + + SubtreeCount int + Parent *Node + Shards []int +} + +var root *Node + +// BySubtreeCount implements sort.Interface for []*Node based on the SubtreeCount field. +type BySubtreeCount []*Node + +func (a BySubtreeCount) Len() int { return len(a) } +func (a BySubtreeCount) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a BySubtreeCount) Less(i, j int) bool { return a[i].SubtreeCount > a[j].SubtreeCount } + +func splitNodes(nodes []*Node, n int) ([][]*Node, []int) { + // Sort the nodes by subtree_count in descending order + sort.Sort(BySubtreeCount(nodes)) + + // Initialize n sets + sets := make([][]*Node, n) + setSums := make([]int, n) // To keep track of the sum of subtree_count for each set + + // Distribute nodes into sets + for _, node := range nodes { + // Find the set with the smallest sum + minSetIdx := 0 + for i := 1; i < n; i++ { + if setSums[i] < setSums[minSetIdx] { + minSetIdx = i + } + } + // Add the current node to the selected set + sets[minSetIdx] = append(sets[minSetIdx], node) + // Update the sum of the selected set + setSums[minSetIdx] += node.SubtreeCount + } + + return sets, setSums +} + +func splitTree(n int, m int) [][][]*Node { + var nodes []*Node + nodes = append(nodes, root.Children...) + + keepSplit := true + var sets [][]*Node + var setSums []int + depth := 0 + var setsSumsDict [][][]*Node + + for keepSplit { + depth++ + if depth > m { + break + } + keepSplit = false + sets, setSums = splitNodes(nodes, n) + fmt.Println(len(sets)) + minSum := min(setSums) + setsSumsDict = [][][]*Node{} + + for i, set := range sets { + setSum := setSums[i] + if depth < m && float64(setSum) > 1.2*float64(minSum) { + keepSplit = true + for _, node := range set { + nodes = removeNode(nodes, node) + nodes = append(nodes, node.Children...) + } + } else { + setsSumsDict = append(setsSumsDict, [][]*Node{set, {&Node{SubtreeCount: setSum}}}) + } + } + } + + return setsSumsDict +} + +func min(sums []int) int { + minValue := sums[0] + for _, v := range sums { + if v < minValue { + minValue = v + } + } + return minValue +} + +func removeNode(nodes []*Node, target *Node) []*Node { + var result []*Node + for _, node := range nodes { + if node != target { + result = append(result, node) + } + } + return result +} + +// contains checks if a slice contains a specific element. +func contains(slice []int, element int) bool { + for _, item := range slice { + if item == element { + return true + } + } + return false +} + +func (n *Node) appendToParents(i int) { + for node := n; node != nil; node = node.Parent { + if !contains(node.Shards, i) { + node.Shards = append(node.Shards, i) + } + } +} + +func (n *Node) appendToSubtree(i int) { + if !contains(n.Shards, i) { + n.Shards = append(n.Shards, i) + } + for _, child := range n.Children { + child.appendToSubtree(i) + } +} + +func (n *Node) clearShards() { + n.Shards = make([]int, 0) + for _, child := range n.Children { + child.clearShards() + } +} + +func UpdateShard(n, m int) { + // Call splitTree to get the sets + if n == 0 { + return + } + root.clearShards() + sets := splitTree(n, m) + + // Add these sets to the global shardLists + + for i, setSum := range sets { + sum := setSum[1][0].SubtreeCount + set := setSum[0] + + subtreeCounts := make([]string, len(set)) + splitGenerations := make([]string, len(set)) + for j, node := range set { + subtreeCounts[j] = fmt.Sprintf("%d", node.SubtreeCount) + splitGenerations[j] = fmt.Sprintf("%d", node.SplitGeneration) + // for node's parent: append i to shards field + // for node's children: append i to shards field + node.appendToParents(i) + node.appendToSubtree(i) + } + + fmt.Printf("Set %d has a sum of %d and contains nodes with subtree_counts: [%s] with ids: [%s]\n", i+1, sum, strings.Join(subtreeCounts, ", "), strings.Join(splitGenerations, ", ")) + } + fmt.Println() + // bfs(root) +} + +func updateSubtreeCount(node *Node) int { + // Base case: if the node has no children, its subtree_count is just its own count + if len(node.Children) == 0 { + node.SubtreeCount = node.Count + return node.Count + } + + // Start with the current node's count + totalCount := node.Count + + // Recursively update the count for all children + for _, child := range node.Children { + totalCount += updateSubtreeCount(child) + } + + // After the total count for all children is calculated, update the current node's subtree_count + node.SubtreeCount = totalCount + + return totalCount +} + +// setParents traverses the tree and sets each node's Parent field. +func setParents(root *Node, generationToNode map[int]*Node) { + if root == nil { + return + } + + // Map the current node's SplitGeneration to the node itself. + generationToNode[root.SplitGeneration] = root + + // Set the Parent for each child and recurse. + for _, child := range root.Children { + child.Parent = generationToNode[child.ParentInt] + setParents(child, generationToNode) + } +} + +func GetRoot() error { + // Read the JSON file + // TODO: not to hardcode + fileContent, err := ioutil.ReadFile(tree_path) + if err != nil { + return err + } + + // Unmarshal the JSON content into the Node struct + rootNode := Node{} + err = json.Unmarshal(fileContent, &rootNode) + if err != nil { + return err + } + root = &rootNode + + generationToNode := make(map[int]*Node) + + // Set the parent nodes using the map and recursive traversal. + setParents(root, generationToNode) + // fmt.Println(root.Children[0].Children[0].Parent.SplitGeneration) + + // update the subtree_count + updateSubtreeCount(root) + + return nil +} + +func (n *Node) Lookup(required_pkgs []string) *Node { + for _, pkg := range n.Packages { + found := false + for _, req := range required_pkgs { + if pkg == req { + found = true + break + } + } + if !found { + return nil + } + } + + for _, child := range n.Children { + bestNode := child.Lookup(required_pkgs) + if bestNode != nil { + return bestNode + } + } + + return n +} + +func ShardingGetGroup(pkgs []string) ([]int, error) { + node := root.Lookup(pkgs) + // fmt.Println("Debug3: ", node.SplitGeneration, node.Shards) + return node.Shards, nil +} + +// func bfs(root *Node) { +// queue := []*Node{root} // Initialize the queue with the root node + +// for len(queue) > 0 { +// current := queue[0] // Get the first node in the queue +// queue = queue[1:] // Dequeue the current node +// fmt.Println(current.SplitGeneration, current.Shards) // Print the Shards of the current node + +// // Enqueue the children of the current node +// for _, child := range current.Children { +// queue = append(queue, child) +// } +// } +// } diff --git a/src/boss/loadbalancer/trace.go b/src/boss/loadbalancer/trace.go new file mode 100644 index 000000000..22c8a0d13 --- /dev/null +++ b/src/boss/loadbalancer/trace.go @@ -0,0 +1,50 @@ +package loadbalancer + +import ( + "bufio" + "encoding/json" + "log" + "os" +) + +var Traces *TraceList + +type Trace struct { + Deps []string `json:"deps"` + Name string `json:"name"` + Top []string `json:"top"` + Type string `json:"type"` +} + +type TraceList struct { + Data []Trace +} + +func LoadTrace() *TraceList { + filePath := "/home/azureuser/open-lambda/dep-trace.json" + file, err := os.Open(filePath) + if err != nil { + log.Fatalf("Failed to open file: %s", err) + } + defer file.Close() + + var data []Trace + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + var record Trace + if err := json.Unmarshal([]byte(line), &record); err != nil { + log.Fatalf("Error parsing line as JSON: %s", err) + } + data = append(data, record) + } + + if err := scanner.Err(); err != nil { + log.Fatalf("Error reading file: %s", err) + } + + res := &TraceList{ + Data: data, + } + return res +} diff --git a/src/common/config.go b/src/common/config.go index 35804e85c..04839a6de 100644 --- a/src/common/config.go +++ b/src/common/config.go @@ -75,6 +75,8 @@ type FeaturesConfig struct { Import_cache string `json:"import_cache"` Downsize_paused_mem bool `json:"downsize_paused_mem"` Enable_seccomp bool `json:"enable_seccomp"` + Warmup bool `json:"warmup"` + COW bool `json:"COW"` } type TraceConfig struct { @@ -144,8 +146,9 @@ func LoadDefaults(olPath string) error { if err != nil { return err } - totalMb := uint64(in.Totalram) * uint64(in.Unit) / 1024 / 1024 - memPoolMb := Max(int(totalMb-500), 500) + // totalMb := uint64(in.Totalram) * uint64(in.Unit) / 1024 / 1024 + // memPoolMb := Max(int(totalMb-500), 500) + memPoolMb := 30000 Conf = &Config{ Worker_dir: workerDir, @@ -165,14 +168,17 @@ func LoadDefaults(olPath string) error { Procs: 10, Mem_mb: 50, CPU_percent: 100, - Max_runtime_default: 30, - Installer_mem_mb: Max(250, Min(500, memPoolMb/2)), + Max_runtime_default: 90, + Installer_mem_mb: 500, Swappiness: 0, }, Features: FeaturesConfig{ Import_cache: "tree", Downsize_paused_mem: true, Enable_seccomp: true, + Warmup: false, + COW: true, + Reuse_cgroups: true, }, Trace: TraceConfig{ Cgroups: false, diff --git a/src/common/stats.go b/src/common/stats.go index c1b7167a0..1f227bfc4 100644 --- a/src/common/stats.go +++ b/src/common/stats.go @@ -4,11 +4,11 @@ import ( "bytes" "container/list" "fmt" + "log" "runtime" "strconv" "sync" "time" - "log" ) type RollingAvg struct { diff --git a/src/go.mod b/src/go.mod index 20c86a50e..80744c8ba 100644 --- a/src/go.mod +++ b/src/go.mod @@ -48,7 +48,7 @@ require ( golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect + golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.7.0 // indirect golang.org/x/tools v0.6.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index fafe0de52..03adedcb0 100644 --- a/src/go.sum +++ b/src/go.sum @@ -890,8 +890,8 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/src/vendor/golang.org/x/sys/execabs/execabs.go b/src/vendor/golang.org/x/sys/execabs/execabs.go index b981cfbb4..3bf40fdfe 100644 --- a/src/vendor/golang.org/x/sys/execabs/execabs.go +++ b/src/vendor/golang.org/x/sys/execabs/execabs.go @@ -63,7 +63,7 @@ func LookPath(file string) (string, error) { } func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { // exec.Command was called with a bare binary name and // exec.LookPath returned a path which is not absolute. // Set cmd.lookPathErr and clear cmd.Path so that it diff --git a/src/vendor/golang.org/x/sys/execabs/execabs_go118.go b/src/vendor/golang.org/x/sys/execabs/execabs_go118.go index 6ab5f5089..2000064a8 100644 --- a/src/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/src/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -7,6 +7,12 @@ package execabs +import "os/exec" + func isGo119ErrDot(err error) bool { return false } + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/src/vendor/golang.org/x/sys/execabs/execabs_go119.go b/src/vendor/golang.org/x/sys/execabs/execabs_go119.go index 46c5b525e..f364b3418 100644 --- a/src/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/src/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -15,3 +15,7 @@ import ( func isGo119ErrDot(err error) bool { return errors.Is(err, exec.ErrDot) } + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/src/vendor/golang.org/x/sys/unix/ioctl.go b/src/vendor/golang.org/x/sys/unix/ioctl.go index 1c51b0ec2..7ce8dd406 100644 --- a/src/vendor/golang.org/x/sys/unix/ioctl.go +++ b/src/vendor/golang.org/x/sys/unix/ioctl.go @@ -8,7 +8,6 @@ package unix import ( - "runtime" "unsafe" ) @@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/src/vendor/golang.org/x/sys/unix/ioctl_zos.go b/src/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d91..6532f09af 100644 --- a/src/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/src/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -27,9 +27,7 @@ func IoctlSetInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -51,13 +49,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7..39dba6ca6 100644 --- a/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/src/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { + return ptrace1Ptr(request, pid, addr, data) +} diff --git a/src/vendor/golang.org/x/sys/unix/ptrace_ios.go b/src/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a01..9ea66330a 100644 --- a/src/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/src/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + return ENOTSUP +} diff --git a/src/vendor/golang.org/x/sys/unix/syscall_aix.go b/src/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51e9..d9f5544cc 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -411,6 +409,7 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/src/vendor/golang.org/x/sys/unix/syscall_bsd.go b/src/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda42671f..7705c3270 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/src/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/vendor/golang.org/x/sys/unix/syscall_darwin.go index 192b071b3..7064d6eba 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,7 +14,6 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) @@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -394,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL diff --git a/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9..9fa879806 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec9963..f17b8c526 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a41111a79..221efc26b 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d50b9dc25..5bdde03e4 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) @@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) { } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { return ptrace(PT_STEP, pid, 1, 0) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a91d471d..b8da51004 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 48110a0ab..47155c483 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 52f1d4b75..08932093f 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 5537ee4f2..d151a0d0e 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index 164abd5d2..d5cd64b37 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/src/vendor/golang.org/x/sys/unix/syscall_hurd.go b/src/vendor/golang.org/x/sys/unix/syscall_hurd.go index 4ffb64808..381fd4673 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { } return } + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/src/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/vendor/golang.org/x/sys/unix/syscall_linux.go index 5443dddd4..973533153 100644 --- a/src/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -2154,6 +2158,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1< 0 { diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 77479d458..112906562 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2e966d4d7..55f5abfe5 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d65a7c0fa..d39651c2b 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 6f0b97c6d..ddb740868 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index e1c23b527..09a53a616 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 36ea3a55b..430cb24de 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 79f738996..8e1d9c8f6 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fb161f3a2..21c695040 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 4c8ac993a..298168f90 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 76dd8ec4f..68b8bd492 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index caeb807bd..0b0f910e1 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index a05e5f4ff..48ff5de75 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index b2da8e50c..2452a641d 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 048b2655e..5e35600a6 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 6f33e37e7..b04cef1a1 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 330cf7f7a..47a07ee0c 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 5f24de0d9..573378fdb 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 78d4a4240..4873a1e5d 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -657,6 +657,17 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) diff --git a/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f2079457c..07bfe2ef9 100644 --- a/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/src/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -267,6 +267,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index d9c78cdcb..29dc48337 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -362,7 +362,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 26991b165..0a89b2890 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -367,7 +367,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index f8324e7e7..c8666bb15 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -350,7 +350,7 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 4220411f3..88fb48a88 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -347,7 +347,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 0660fd45c..698dc975e 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -348,7 +348,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7d9fc8f1c..ca84727cf 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -456,36 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -528,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1043,6 +1067,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1239,7 +1264,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1939,7 +1964,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x1a NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2443,9 +2472,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3265,7 +3296,7 @@ const ( DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae DEVLINK_ATTR_NESTED_DEVLINK = 0xaf DEVLINK_ATTR_SELFTESTS = 0xb0 - DEVLINK_ATTR_MAX = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3281,7 +3312,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -3572,7 +3604,8 @@ const ( ETHTOOL_MSG_MODULE_SET = 0x23 ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 - ETHTOOL_MSG_USER_MAX = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x26 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3611,7 +3644,8 @@ const ( ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 - ETHTOOL_MSG_KERNEL_MAX = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x26 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3679,7 +3713,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -4409,7 +4444,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x140 + NL80211_ATTR_MAX = 0x141 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4552,6 +4587,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -5752,3 +5788,25 @@ const ( AUDIT_NLGRP_NONE = 0x0 AUDIT_NLGRP_READLOG = 0x1 ) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 89c516a29..4ecc1495c 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -414,7 +414,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 62b4fb269..34fddff96 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -427,7 +427,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e86b35893..3b14a6031 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -405,7 +405,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6c6be4c91..0517651ab 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -406,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 4982ea355..3b0c51813 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -407,7 +407,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 173141a67..fccdf4dd0 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 93ae4c516..500de8fc0 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 4e4e510ca..d0434cd2c 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 3f5ba013d..84206ba53 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 71dfe7cdb..ab078cf1f 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -417,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3a2b7f0a6..42eb2c4ce 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index a52d62756..31304a4e8 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dfc007d8a..c311f9612 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -434,7 +434,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b53cb9103..bba3cefac 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -429,7 +429,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index fe0aa3547..ad8a01380 100644 --- a/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/src/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -411,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/src/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/vendor/golang.org/x/sys/windows/syscall_windows.go index 41cb3c01f..3723b2c22 100644 --- a/src/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -824,6 +824,9 @@ const socket_error = uintptr(^uint32(0)) //sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket //sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto //sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom @@ -1019,8 +1022,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/src/vendor/golang.org/x/sys/windows/types_windows.go b/src/vendor/golang.org/x/sys/windows/types_windows.go index 0c4add974..857acf103 100644 --- a/src/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/vendor/golang.org/x/sys/windows/types_windows.go @@ -1243,6 +1243,51 @@ const ( DnsSectionAdditional = 0x0003 ) +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + type DNSSRVData struct { Target *uint16 Priority uint16 @@ -3258,3 +3303,43 @@ const ( DWMWA_TEXT_COLOR = 36 DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 ) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} diff --git a/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go index ac60052e4..6d2a26853 100644 --- a/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -474,6 +474,9 @@ var ( procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") procWSASend = modws2_32.NewProc("WSASend") @@ -4067,6 +4070,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo return } +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) if r1 == socket_error { diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index eab0e3308..0553959aa 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -223,7 +223,7 @@ golang.org/x/mod/semver ## explicit; go 1.17 golang.org/x/net/http/httpguts golang.org/x/net/idna -# golang.org/x/sys v0.5.0 +# golang.org/x/sys v0.6.0 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader diff --git a/src/worker/commands.go b/src/worker/commands.go index 36806956d..2d41bdf17 100644 --- a/src/worker/commands.go +++ b/src/worker/commands.go @@ -130,7 +130,7 @@ func upCmd(ctx *cli.Context) error { var pingErr error - for i := 0; i < 300; i++ { + for i := 0; i < 3000; i++ { // check if it has died select { case err := <-died: @@ -172,7 +172,7 @@ func upCmd(ctx *cli.Context) error { return fmt.Errorf("expected PID %v but found %v (port conflict?)", proc.Pid, pid) } - return fmt.Errorf("worker still not reachable after 30 seconds :: %s", pingErr) + return fmt.Errorf("worker still not reachable after 3 minutes :: %s", pingErr) } if err := server.Main(); err != nil { diff --git a/src/worker/helpers.go b/src/worker/helpers.go index be1a7933c..5bb830fb4 100644 --- a/src/worker/helpers.go +++ b/src/worker/helpers.go @@ -195,14 +195,14 @@ func stopOL(olPath string) error { fmt.Printf("According to %s, a worker should already be running (PID %d).\n", pidPath, pid) p, err := os.FindProcess(pid) if err != nil { - return fmt.Errorf("Failed to find worker process with PID %d. May require manual cleanup.\n", pid) + return fmt.Errorf("Failed to find worker process with PID %d. May require manual cleanup.%s\n", pid, pidPath) } fmt.Printf("Send SIGINT and wait for worker to exit cleanly.\n") if err := p.Signal(syscall.SIGINT); err != nil { return fmt.Errorf("Failed to send SIGINT to PID %d (%s). May require manual cleanup.\n", pid, err.Error()) } - for i := 0; i < 600; i++ { + for i := 0; i < 1800; i++ { err := p.Signal(syscall.Signal(0)) if err != nil { fmt.Printf("OL worker process stopped successfully.\n") @@ -211,7 +211,7 @@ func stopOL(olPath string) error { time.Sleep(100 * time.Millisecond) } - return fmt.Errorf("worker didn't stop after 60s") + return fmt.Errorf("worker didn't stop after 180s") } // modify the config.json file based on settings from cmdline: -o opt1=val1,opt2=val2,... diff --git a/src/worker/lambda/handlerPuller.go b/src/worker/lambda/handlerPuller.go index 63f37c9bb..812c73cf6 100644 --- a/src/worker/lambda/handlerPuller.go +++ b/src/worker/lambda/handlerPuller.go @@ -1,20 +1,20 @@ package lambda import ( + "archive/tar" + "compress/gzip" "errors" "fmt" + "github.com/open-lambda/open-lambda/ol/common" "io" "io/ioutil" "log" "net/http" "os" - "os/exec" "path/filepath" "regexp" "strings" "sync" - - "github.com/open-lambda/open-lambda/ol/common" ) var notFound404 = errors.New("file does not exist") @@ -113,9 +113,9 @@ func (cp *HandlerPuller) pullLocalFile(src, lambdaName string) (rt_type common.R // expected to be efficient targetDir = cp.dirMaker.Get(lambdaName) - cmd := exec.Command("cp", "-r", src, targetDir) - if output, err := cmd.CombinedOutput(); err != nil { - return rt_type, "", fmt.Errorf("%s :: %s", err, string(output)) + err := copyItem(src, targetDir) + if err != nil { + return rt_type, "", fmt.Errorf("%s", err) } // Figure out runtime type @@ -156,27 +156,27 @@ func (cp *HandlerPuller) pullLocalFile(src, lambdaName string) (rt_type common.R if strings.HasSuffix(stat.Name(), ".py") { log.Printf("Installing `%s` from a python file", src) - cmd := exec.Command("cp", src, filepath.Join(targetDir, "f.py")) - rt_type = common.RT_PYTHON - - if output, err := cmd.CombinedOutput(); err != nil { - return rt_type, "", fmt.Errorf("%s :: %s", err, string(output)) + // cmd := exec.Command("cp", src, filepath.Join(targetDir, "f.py")) + err := copyItem(src, filepath.Join(targetDir, "f.py")) + if err != nil { + return rt_type, "", err } + rt_type = common.RT_PYTHON } else if strings.HasSuffix(stat.Name(), ".bin") { log.Printf("Installing `%s` from binary file", src) - cmd := exec.Command("cp", src, filepath.Join(targetDir, "f.bin")) - rt_type = common.RT_NATIVE - - if output, err := cmd.CombinedOutput(); err != nil { - return rt_type, "", fmt.Errorf("%s :: %s", err, string(output)) + // cmd := exec.Command("cp", src, filepath.Join(targetDir, "f.bin")) + err := copyItem(src, filepath.Join(targetDir, "f.bin")) + if err != nil { + return rt_type, "", err } + rt_type = common.RT_NATIVE } else if strings.HasSuffix(stat.Name(), ".tar.gz") { log.Printf("Installing `%s` from an archive file", src) - cmd := exec.Command("tar", "-xzf", src, "--directory", targetDir) - if output, err := cmd.CombinedOutput(); err != nil { - return rt_type, "", fmt.Errorf("%s :: %s", err, string(output)) + err := decompressTarGz(src, targetDir) + if err != nil { + return rt_type, "", fmt.Errorf("%s", err) } // Figure out runtime type @@ -269,3 +269,106 @@ func (cp *HandlerPuller) getCache(name string) *CacheEntry { func (cp *HandlerPuller) putCache(name, version, path string) { cp.dirCache.Store(name, &CacheEntry{version, path}) } + +// copyItem function will either copy a file or recursively copy a directory +func copyItem(src, dst string) error { + info, err := os.Stat(src) + if err != nil { + return err + } + + if info.IsDir() { + return copyDir(src, dst) + } + return copyFile(src, dst) +} + +func copyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + return err +} + +func copyDir(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + + destPath := filepath.Join(dst, relPath) + + if info.IsDir() { + // Create the directory if it doesn't exist + if _, err := os.Stat(destPath); os.IsNotExist(err) { + os.MkdirAll(destPath, info.Mode()) + } + return nil + } + + // Copy the file + return copyFile(path, destPath) + }) +} + +// decompressTarGz is equivalent to `tar -xzf src --directory dst` +func decompressTarGz(src, dst string) error { + r, err := os.Open(src) + if err != nil { + return err + } + defer r.Close() + + gr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gr.Close() + + tr := tar.NewReader(gr) + + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + target := filepath.Join(dst, header.Name) + switch header.Typeflag { + case tar.TypeDir: + if _, err := os.Stat(target); err != nil { + if err := os.MkdirAll(target, os.FileMode(header.Mode)); err != nil { + return err + } + } + case tar.TypeReg: + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(f, tr); err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/src/worker/lambda/lambdaFunction.go b/src/worker/lambda/lambdaFunction.go index 1defda727..5e9eb4b37 100644 --- a/src/worker/lambda/lambdaFunction.go +++ b/src/worker/lambda/lambdaFunction.go @@ -2,9 +2,13 @@ package lambda import ( "bufio" + "bytes" "container/list" + "encoding/json" "errors" "fmt" + "io" + "io/ioutil" "log" "net/http" "os" @@ -88,6 +92,7 @@ func parseMeta(codeDir string) (meta *sandbox.SandboxMeta, err error) { line := strings.ReplaceAll(scnr.Text(), " ", "") pkg := strings.Split(line, "#")[0] if pkg != "" { + pkg = strings.Split(pkg, ";")[0] // avoid conditional dependencies for now pkg = packages.NormalizePkg(pkg) meta.Installs = append(meta.Installs, pkg) } @@ -236,6 +241,7 @@ func (f *LambdaFunc) Task() { // check for new code, and cleanup old code // (and instances that use it) if necessary + tStartPullHandler := float64(time.Now().UnixNano()) / float64(time.Millisecond) oldCodeDir := f.codeDir if err := f.pullHandlerIfStale(); err != nil { f.printf("Error checking for new lambda code at `%s`: %v", f.codeDir, err) @@ -244,6 +250,17 @@ func (f *LambdaFunc) Task() { req.done <- true continue } + tEndPullHandler := float64(time.Now().UnixNano()) / float64(time.Millisecond) + argsDict := make(map[string]interface{}) + bodyBytes, _ := ioutil.ReadAll(req.r.Body) + json.Unmarshal(bodyBytes, &argsDict) + if argsDict == nil { + argsDict = make(map[string]interface{}) + } + argsDict["start_pullHandler"] = tStartPullHandler + argsDict["end_pullHandler"] = tEndPullHandler + newReqBytes, _ := json.Marshal(argsDict) + req.r.Body = io.NopCloser(bytes.NewBuffer(newReqBytes)) if oldCodeDir != "" && oldCodeDir != f.codeDir { el := f.instances.Front() diff --git a/src/worker/lambda/lambdaInstance.go b/src/worker/lambda/lambdaInstance.go index bd250efb9..ca24ccc8d 100644 --- a/src/worker/lambda/lambdaInstance.go +++ b/src/worker/lambda/lambdaInstance.go @@ -1,10 +1,15 @@ package lambda import ( + "bytes" + "encoding/json" + "fmt" "io" + "io/ioutil" "log" "net/http" "strings" + "time" "github.com/open-lambda/open-lambda/ol/common" "github.com/open-lambda/open-lambda/ol/worker/sandbox" @@ -77,6 +82,8 @@ func (linst *LambdaInstance) Task() { return } + tStartCreate := float64(time.Now().UnixNano()) / float64(time.Millisecond) + t := common.T0("LambdaInstance-WaitSandbox") // if we have a sandbox, try unpausing it to see if it is still alive if sb != nil { @@ -96,6 +103,7 @@ func (linst *LambdaInstance) Task() { // if we don't already have a Sandbox, create one, and // HTTP proxy over the channel + miss := 0 if sb == nil { sb = nil @@ -103,14 +111,14 @@ func (linst *LambdaInstance) Task() { scratchDir := f.lmgr.scratchDirs.Make(f.name) // we don't specify parent SB, because ImportCache.Create chooses it for us - sb, err = f.lmgr.ZygoteProvider.Create(f.lmgr.sbPool, true, linst.codeDir, scratchDir, linst.meta, f.rtType) + sb, miss, err = f.lmgr.ZygoteProvider.Create(f.lmgr.sbPool, true, linst.codeDir, scratchDir, linst.meta, f.rtType) if err != nil { f.printf("failed to get Sandbox from import cache") sb = nil } } - log.Printf("Creating new sandbox") + log.Printf("Creating new sandbox, zygote miss=%d", miss) // import cache is either disabled or it failed if sb == nil { @@ -128,6 +136,32 @@ func (linst *LambdaInstance) Task() { } t.T1() + // todo: collect latency + tEndCreate := float64(time.Now().UnixNano()) / float64(time.Millisecond) + argsDict := make(map[string]interface{}) + bodyBytes, _ := ioutil.ReadAll(req.r.Body) + if argsDict == nil { + fmt.Printf("req.r.Body is nil\n") + } + json.Unmarshal(bodyBytes, &argsDict) + if _, ok := argsDict["name"]; !ok { + // name is a unique identifier for each call, specified by the sender + // default is linst.lfunc.name, but cannot trace multiple calls on same lambda + argsDict["name"] = linst.lfunc.name + } + if _, ok := argsDict["req"]; !ok { + argsDict["req"] = 0 + } + argsDict["start_create"] = tStartCreate + argsDict["end_create"] = tEndCreate + argsDict["split_gen"] = sb.(*sandbox.SafeSandbox).Sandbox.(*sandbox.SOCKContainer).Node + argsDict["sb_id"] = sb.(*sandbox.SafeSandbox).Sandbox.(*sandbox.SOCKContainer).ID() + argsDict["zygote_miss"] = miss + tStartCreate, tEndCreate, miss = 0, 0, 0 + + newReqBytes, _ := json.Marshal(argsDict) + req.r.Body = io.NopCloser(bytes.NewBuffer(newReqBytes)) + // below here, we're guaranteed (1) sb != nil, (2) proxy != nil, (3) sb is unpaused // serve until we incoming queue is empty diff --git a/src/worker/lambda/lambdaManager.go b/src/worker/lambda/lambdaManager.go index f2561c0ab..0755b7cac 100644 --- a/src/worker/lambda/lambdaManager.go +++ b/src/worker/lambda/lambdaManager.go @@ -2,6 +2,7 @@ package lambda import ( "container/list" + "fmt" "log" "net/http" "path/filepath" @@ -21,7 +22,7 @@ type LambdaMgr struct { sbPool sandbox.SandboxPool *packages.DepTracer *packages.PackagePuller // depends on sbPool and DepTracer - zygote.ZygoteProvider // depends PackagePuller + zygote.ZygoteProvider // depends PackagePuller *HandlerPuller // depends on sbPool and ImportCache[optional] // storage dirs that we manage @@ -149,6 +150,8 @@ func (mgr *LambdaMgr) DumpStatsToLog() { } log.Printf("Request Profiling (cumulative seconds):") + time(0, "ImportCache.Warmup", "") + time(0, "LambdaFunc.Invoke", "") time(1, "LambdaInstance-WaitSandbox", "LambdaFunc.Invoke") @@ -158,6 +161,7 @@ func (mgr *LambdaMgr) DumpStatsToLog() { time(3, "ImportCache.root.Lookup", "ImportCache.Create") time(3, "ImportCache.createChildSandboxFromNode", "ImportCache.Create") time(4, "ImportCache.getSandboxInNode", "ImportCache.createChildSandboxFromNode") + time(5, "ImportCache.getSandboxInNode:Lock", "ImportCache.getSandboxInNode") time(4, "ImportCache.createChildSandboxFromNode:childSandboxPool.Create", "ImportCache.createChildSandboxFromNode") time(4, "ImportCache.putSandboxInNode", "ImportCache.createChildSandboxFromNode") @@ -165,6 +169,14 @@ func (mgr *LambdaMgr) DumpStatsToLog() { time(5, "ImportCache.putSandboxInNode:Pause", "ImportCache.putSandboxInNode") time(1, "LambdaInstance-ServeRequests", "LambdaFunc.Invoke") time(2, "LambdaInstance-RoundTrip", "LambdaInstance-ServeRequests") + time(0, "Unpause()", "") + time(0, "Pause()", "") + time(0, "Create()", "") + log.Printf("eviction dict %v, evict zygote number %d\n", sandbox.EvictDict, sandbox.EvictZygoteCnt) + + for k, v := range sandbox.EvictDict { + fmt.Printf("evict %d for %d times, create %d times\n", k, v, zygote.CreateCount[k]) + } } func (mgr *LambdaMgr) Cleanup() { diff --git a/src/worker/lambda/packages/packagePuller.go b/src/worker/lambda/packages/packagePuller.go index b787bf848..03f1bf418 100644 --- a/src/worker/lambda/packages/packagePuller.go +++ b/src/worker/lambda/packages/packagePuller.go @@ -40,10 +40,15 @@ type Package struct { // the pip-install admin lambda returns this type PackageMeta struct { - Deps []string `json:"Deps"` + Deps []string `json:"Deps"` // deprecated TopLevel []string `json:"TopLevel"` } +type ModuleInfo struct { + Name string + IsPkg bool +} + func NewPackagePuller(sbPool sandbox.SandboxPool, depTracer *DepTracer) (*PackagePuller, error) { // create a lambda function for installing pip packages. We do // each install in a Sandbox for two reasons: @@ -77,47 +82,6 @@ func NormalizePkg(pkg string) string { return strings.ReplaceAll(strings.ToLower(pkg), "_", "-") } -// "pip install" missing packages to Conf.Pkgs_dir -func (pp *PackagePuller) InstallRecursive(installs []string) ([]string, error) { - // shrink capacity to length so that our appends are not - // visible to caller - installs = installs[:len(installs):len(installs)] - - installSet := make(map[string]bool) - for _, install := range installs { - name := strings.Split(install, "==")[0] - installSet[name] = true - } - - // Installs may grow as we loop, because some installs have - // deps, leading to other installs - for i := 0; i < len(installs); i++ { - pkg := installs[i] - if common.Conf.Trace.Package { - log.Printf("On %v of %v", pkg, installs) - } - p, err := pp.GetPkg(pkg) - if err != nil { - return nil, err - } - - if common.Conf.Trace.Package { - log.Printf("Package '%s' has deps %v", pkg, p.Meta.Deps) - log.Printf("Package '%s' has top-level modules %v", pkg, p.Meta.TopLevel) - } - - // push any previously unseen deps on the list of ones to install - for _, dep := range p.Meta.Deps { - if !installSet[dep] { - installs = append(installs, dep) - installSet[dep] = true - } - } - } - - return installs, nil -} - // GetPkg does the pip install in a Sandbox, taking care to never install the // same Sandbox more than once. // @@ -169,6 +133,7 @@ func (pp *PackagePuller) sandboxInstall(p *Package) (err error) { // assume dir existence means it is installed already log.Printf("%s appears already installed from previous run of OL", p.Name) alreadyInstalled = true + return nil } else { log.Printf("run pip install %s from a new Sandbox to %s on host", p.Name, scratchDir) if err := os.Mkdir(scratchDir, 0700); err != nil { @@ -219,9 +184,29 @@ func (pp *PackagePuller) sandboxInstall(p *Package) (err error) { return err } - for i, pkg := range p.Meta.Deps { - p.Meta.Deps[i] = NormalizePkg(pkg) + return nil +} + +// IterModules is a simplified implementation of pkgutil.iterModules +// todo: implement every details in pkgutil.iterModules, or find a efficient way to call pkgutil.iterModules in python +func IterModules(path string) ([]ModuleInfo, error) { + var modules []ModuleInfo + + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err } - return nil + for _, file := range files { + if file.IsDir() { + // Check if the directory contains an __init__.py file, which would make it a package. + if _, err := os.Stat(filepath.Join(path, file.Name(), "__init__.py")); !os.IsNotExist(err) { + modules = append(modules, ModuleInfo{Name: file.Name(), IsPkg: true}) + } + } else if strings.HasSuffix(file.Name(), ".py") && file.Name() != "__init__.py" { + modName := strings.TrimSuffix(file.Name(), ".py") + modules = append(modules, ModuleInfo{Name: modName, IsPkg: false}) + } + } + return modules, nil } diff --git a/src/worker/lambda/zygote/api.go b/src/worker/lambda/zygote/api.go index 1dcc14c5d..9b9aeee08 100644 --- a/src/worker/lambda/zygote/api.go +++ b/src/worker/lambda/zygote/api.go @@ -8,6 +8,7 @@ import ( type ZygoteProvider interface { Create(childSandboxPool sandbox.SandboxPool, isLeaf bool, codeDir, scratchDir string, meta *sandbox.SandboxMeta, - rt_type common.RuntimeType) (sandbox.Sandbox, error) + rt_type common.RuntimeType) (sandbox.Sandbox, int, error) + Warmup() error Cleanup() } diff --git a/src/worker/lambda/zygote/importCache.go b/src/worker/lambda/zygote/importCache.go index 49068129e..b1bc41a5b 100755 --- a/src/worker/lambda/zygote/importCache.go +++ b/src/worker/lambda/zygote/importCache.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "log" + "path/filepath" "strings" "sync" "sync/atomic" @@ -29,8 +30,9 @@ type ImportCache struct { // Sandbox death, etc) type ImportCacheNode struct { // from config file: - Packages []string `json:"packages"` - Children []*ImportCacheNode `json:"children"` + Packages []string `json:"packages"` + Children []*ImportCacheNode `json:"children"` + SplitGeneration int `json:"split_generation"` // backpointers based on Children structure parent *ImportCacheNode @@ -60,10 +62,6 @@ type ImportCacheNode struct { meta *sandbox.SandboxMeta } -type ZygoteReq struct { - parent chan sandbox.Sandbox -} - func NewImportCache(codeDirs *common.DirMaker, scratchDirs *common.DirMaker, sbPool sandbox.SandboxPool, pp *packages.PackagePuller) (ic *ImportCache, err error) { cache := &ImportCache{ codeDirs: codeDirs, @@ -144,7 +142,8 @@ func (cache *ImportCache) recursiveKill(node *ImportCacheNode) { } // (1) find Zygote and (2) use it to try creating a new Sandbox -func (cache *ImportCache) Create(childSandboxPool sandbox.SandboxPool, isLeaf bool, codeDir, scratchDir string, meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, error) { +func (cache *ImportCache) Create(childSandboxPool sandbox.SandboxPool, isLeaf bool, codeDir, scratchDir string, + meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, int, error) { t := common.T0("ImportCache.Create") defer t.T1() @@ -165,17 +164,32 @@ func (cache *ImportCache) Create(childSandboxPool sandbox.SandboxPool, isLeaf bo // the new Sandbox may either be for a Zygote, or a leaf Sandbox func (cache *ImportCache) createChildSandboxFromNode( childSandboxPool sandbox.SandboxPool, node *ImportCacheNode, isLeaf bool, - codeDir, scratchDir string, meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, error) { + codeDir, scratchDir string, meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, int, error) { t := common.T0("ImportCache.createChildSandboxFromNode") defer t.T1() + if !common.Conf.Features.COW { + if isLeaf { + sb, err := childSandboxPool.Create(node.sb, isLeaf, codeDir, scratchDir, meta, rt_type) + return sb, 0, err + } else { + if node.sb != nil { + return node.sb, 0, nil + } + sb, err := childSandboxPool.Create(nil, false, codeDir, scratchDir, meta, rt_type) + return sb, 0, err + } + } // try twice, restarting parent Sandbox if it fails the first time forceNew := false for i := 0; i < 2; i++ { - zygoteSB, isNew, err := cache.getSandboxInNode(node, forceNew, rt_type) + if forceNew { + fmt.Printf("forceNew is true\n") + } + zygoteSB, isNew, miss, err := cache.getSandboxInNode(node, forceNew, rt_type, common.Conf.Features.COW) if err != nil { - return nil, err + return nil, 0, err } t2 := common.T0("ImportCache.createChildSandboxFromNode:childSandboxPool.Create") @@ -193,9 +207,13 @@ func (cache *ImportCache) createChildSandboxFromNode( // dec ref count cache.putSandboxInNode(node, zygoteSB) + if isLeaf && sb != nil { + sb.(*sandbox.SafeSandbox).Sandbox.(*sandbox.SOCKContainer).Node = node.SplitGeneration + } + // isNew is guaranteed to be true on 2nd iteration if err != sandbox.FORK_FAILED || isNew { - return sb, err + return sb, miss, err } forceNew = true @@ -211,11 +229,14 @@ func (cache *ImportCache) createChildSandboxFromNode( // // the Sandbox returned is guaranteed to be in Unpaused state. After // use, caller must also call putSandboxInNode to release ref count -func (cache *ImportCache) getSandboxInNode(node *ImportCacheNode, forceNew bool, rt_type common.RuntimeType) (sb sandbox.Sandbox, isNew bool, err error) { +func (cache *ImportCache) getSandboxInNode(node *ImportCacheNode, forceNew bool, rt_type common.RuntimeType, cow bool, +) (sb sandbox.Sandbox, isNew bool, miss int, err error) { t := common.T0("ImportCache.getSandboxInNode") defer t.T1() + t1 := common.T0("ImportCache.getSandboxInNode:Lock") node.mutex.Lock() + t1.T1() defer node.mutex.Unlock() // destroy any old Sandbox first if we're required to do so @@ -230,18 +251,23 @@ func (cache *ImportCache) getSandboxInNode(node *ImportCacheNode, forceNew bool, if node.sbRefCount == 0 { if err := node.sb.Unpause(); err != nil { node.sb = nil - return nil, false, err + return nil, false, 0, err } } node.sbRefCount += 1 - return node.sb, false, nil + fmt.Printf("node.sb != nil: node %d, getSandboxInNode with ref count %d\n", node.SplitGeneration, node.sbRefCount) + return node.sb, false, 0, nil } else { - // SLOW PATH - if err := cache.createSandboxInNode(node, rt_type); err != nil { - return nil, false, err + // SLOW PATH, miss >= 1 + if miss, err = cache.createSandboxInNode(node, rt_type, cow); err != nil { + fmt.Printf("getSandboxInNode error: %s \n", err.Error()) + if node.parent != nil { + fmt.Printf("node %d, parent %d\n", err.Error(), node.SplitGeneration, node.parent.SplitGeneration) + } + return nil, false, 0, err } node.sbRefCount = 1 - return node.sb, true, nil + return node.sb, true, miss, nil } } @@ -266,7 +292,6 @@ func (*ImportCache) putSandboxInNode(node *ImportCacheNode, sb sandbox.Sandbox) } node.sbRefCount -= 1 - if node.sbRefCount == 0 { t2 := common.T0("ImportCache.putSandboxInNode:Pause") if err := node.sb.Pause(); err != nil { @@ -280,24 +305,68 @@ func (*ImportCache) putSandboxInNode(node *ImportCacheNode, sb sandbox.Sandbox) } } -func (cache *ImportCache) createSandboxInNode(node *ImportCacheNode, rt_type common.RuntimeType) (err error) { +var countMapLock sync.Mutex +var CreateCount = make(map[int]int) + +func appendUnique(original []string, elementsToAdd []string) []string { + exists := make(map[string]bool) + for _, item := range original { + exists[item] = true + } + + for _, item := range elementsToAdd { + if !exists[item] { + original = append(original, item) + exists[item] = true + } + } + + return original +} + +// inherit the meta for all the ancestors +func inheritMeta(node *ImportCacheNode) (meta *sandbox.SandboxMeta) { + tmpNode := node.parent + meta = node.meta + for tmpNode.SplitGeneration != 0 { + if tmpNode.meta != nil { + // merge meta + meta.Imports = appendUnique(meta.Imports, tmpNode.meta.Imports) + } + tmpNode = tmpNode.parent + } + return meta +} + +func (cache *ImportCache) createSandboxInNode(node *ImportCacheNode, rt_type common.RuntimeType, cow bool) (miss int, err error) { // populate codeDir/packages with deps, and record top-level mods) if node.codeDir == "" { codeDir := cache.codeDirs.Make("import-cache") // TODO: clean this up upon failure - - installs, err := cache.pkgPuller.InstallRecursive(node.Packages) - if err != nil { - return err + // todo: only thing is capture top-level mods, no need to open another sandbox + // if all pkgs required by lambda are guaranteed to be installed, then no need to call getPkg(), + // but sometimes a zygote is created without requests, e.g. warm up the tree, then getPkg() is needed + installs := []string{} + for _, name := range node.AllPackages() { + _, err := cache.pkgPuller.GetPkg(name) + if err != nil { + return 0, fmt.Errorf("ImportCache.go: could not get package %s: %v", name, err) + } + installs = append(installs, name) } topLevelMods := []string{} for _, name := range node.Packages { - pkg, err := cache.pkgPuller.GetPkg(name) + pkgPath := filepath.Join(common.Conf.SOCK_base_path, "packages", name, "files") + moduleInfos, err := packages.IterModules(pkgPath) if err != nil { - return err + return 0, err + } + modulesNames := []string{} + for _, moduleInfo := range moduleInfos { + modulesNames = append(modulesNames, moduleInfo.Name) } - topLevelMods = append(topLevelMods, pkg.Meta.TopLevel...) + topLevelMods = append(topLevelMods, modulesNames...) } node.codeDir = codeDir @@ -305,24 +374,119 @@ func (cache *ImportCache) createSandboxInNode(node *ImportCacheNode, rt_type com // policy: what modules should we pre-import? Top-level of // pre-initialized packages is just one possibility... node.meta = &sandbox.SandboxMeta{ - Installs: installs, - Imports: topLevelMods, + Installs: installs, + Imports: topLevelMods, + SplitGeneration: node.SplitGeneration, } } scratchDir := cache.scratchDirs.Make("import-cache") var sb sandbox.Sandbox + miss = 0 if node.parent != nil { - sb, err = cache.createChildSandboxFromNode(cache.sbPool, node.parent, false, node.codeDir, scratchDir, node.meta, rt_type) + if cow { + sb, miss, err = cache.createChildSandboxFromNode(cache.sbPool, node.parent, false, node.codeDir, scratchDir, node.meta, rt_type) + } else { + node.meta = inheritMeta(node) + // create a new sandbox without parent + sb, err = cache.sbPool.Create(nil, false, node.codeDir, scratchDir, node.meta, common.RT_PYTHON) + } } else { sb, err = cache.sbPool.Create(nil, false, node.codeDir, scratchDir, node.meta, common.RT_PYTHON) } if err != nil { - return err + return 0, err } node.sb = sb + + countMapLock.Lock() + CreateCount[node.SplitGeneration] += 1 + countMapLock.Unlock() + + return miss + 1, nil +} + +// Warmup will initialize every node in the tree, +// to have an accurate memory usage result and prevent warmup from failing, please have a large enough memory to avoid evicting +func (cache *ImportCache) Warmup() error { + COW := common.Conf.Features.COW + rt_type := common.RT_PYTHON + + warmupPy := "pass" + // find all the leaf zygotes in the tree + warmupZygotes := []*ImportCacheNode{} + // do a BFS to find all the leaf zygote + tmpNodes := []*ImportCacheNode{cache.root} + + // when COW is enabled, only create leaf zygotes(so that its parent will also be created) + // when COW is disabled, create all zygotes + for len(tmpNodes) > 0 { + node := tmpNodes[0] + tmpNodes = tmpNodes[1:] + if !COW || len(node.Children) == 0 { + warmupZygotes = append(warmupZygotes, node) + } + if len(node.Children) != 0 { + tmpNodes = append(tmpNodes, node.Children...) + } + } + + errChan := make(chan error, len(warmupZygotes)) + var wg sync.WaitGroup + + goroutinePool := make(chan struct{}, 6) + + for i, node := range warmupZygotes { + wg.Add(1) + goroutinePool <- struct{}{} + + go func(i int, node *ImportCacheNode) { + defer wg.Done() + for _, pkg := range node.Packages { + if _, err := cache.pkgPuller.GetPkg(pkg); err != nil { + errChan <- fmt.Errorf("warmup: could not get package %s: %v", pkg, err) + return + } + } + + zygoteSB, _, _, err := cache.getSandboxInNode(node, false, rt_type, COW) + // if a created zygote is evicted in warmup, then node.sbRefCount will be 0 + if node.sbRefCount == 0 { + fmt.Printf("warning: node %d has a refcnt %d<0, meaning it's destroyed\n", node.SplitGeneration, node.sbRefCount) + } + codeDir := cache.codeDirs.Make("warmup") + // write warmyp_py to codeDir + codePath := filepath.Join(codeDir, "f.py") + ioutil.WriteFile(codePath, []byte(warmupPy), 0777) + scratchDir := cache.scratchDirs.Make("warmup") + sb, err := cache.sbPool.Create(zygoteSB, true, codeDir, scratchDir, nil, rt_type) + if err != nil { + errChan <- fmt.Errorf("failed to warm up zygote tree, reason is %s", err.Error()) + return + } + sb.Destroy("ensure modules are imported in ZygoteSB by launching a fork") + atomic.AddInt64(&node.createNonleafChild, 1) + cache.putSandboxInNode(node, zygoteSB) + if err != nil { + errChan <- fmt.Errorf("failed to warm up zygote tree, reason is %s", err.Error()) + } else { + errChan <- nil + } + <-goroutinePool + }(i, node) + } + + wg.Wait() + close(errChan) + + for err := range errChan { + if err != nil { + return err + } + } + return nil } diff --git a/src/worker/lambda/zygote/multiTree.go b/src/worker/lambda/zygote/multiTree.go index 036fc3f97..daea91bf5 100644 --- a/src/worker/lambda/zygote/multiTree.go +++ b/src/worker/lambda/zygote/multiTree.go @@ -14,6 +14,11 @@ type MultiTree struct { trees []*ImportCache } +func (mt *MultiTree) Warmup() error { + //TODO implement warm up + panic("multi-tree warmup not implemented") +} + func NewMultiTree(codeDirs *common.DirMaker, scratchDirs *common.DirMaker, sbPool sandbox.SandboxPool, pp *packages.PackagePuller) (*MultiTree, error) { var tree_count int switch cpus := runtime.NumCPU(); { @@ -30,7 +35,7 @@ func NewMultiTree(codeDirs *common.DirMaker, scratchDirs *common.DirMaker, sbPoo for i := range trees { tree, err := NewImportCache(codeDirs, scratchDirs, sbPool, pp) if err != nil { - for j := 0; j < i; j ++ { + for j := 0; j < i; j++ { trees[j].Cleanup() } return nil, err @@ -40,9 +45,10 @@ func NewMultiTree(codeDirs *common.DirMaker, scratchDirs *common.DirMaker, sbPoo return &MultiTree{trees: trees}, nil } -func (mt *MultiTree) Create(childSandboxPool sandbox.SandboxPool, isLeaf bool, codeDir, scratchDir string, meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, error) { +func (mt *MultiTree) Create(childSandboxPool sandbox.SandboxPool, isLeaf bool, codeDir, scratchDir string, meta *sandbox.SandboxMeta, rt_type common.RuntimeType) (sandbox.Sandbox, int, error) { idx := rand.Intn(len(mt.trees)) - return mt.trees[idx].Create(childSandboxPool, isLeaf, codeDir, scratchDir, meta, rt_type) + sb, miss, err := mt.trees[idx].Create(childSandboxPool, isLeaf, codeDir, scratchDir, meta, rt_type) + return sb, miss, err } func (mt *MultiTree) Cleanup() { diff --git a/src/worker/sandbox/api.go b/src/worker/sandbox/api.go index c587e61be..edef39390 100644 --- a/src/worker/sandbox/api.go +++ b/src/worker/sandbox/api.go @@ -60,7 +60,7 @@ type Sandbox interface { Unpause() error // Communication channel to forward requests. - Client() (*http.Client) + Client() *http.Client // Lookup metadata that Sandbox was initialized with (static over time) Meta() *SandboxMeta @@ -89,6 +89,8 @@ type SandboxMeta struct { Imports []string MemLimitMB int CPUPercent int + + SplitGeneration int } type SandboxError string @@ -115,13 +117,13 @@ type SandboxEventFunc func(SandboxEventType, Sandbox) type SandboxEventType int const ( - EvCreate SandboxEventType = iota - EvDestroy = iota - EvDestroyIgnored = iota - EvPause = iota - EvUnpause = iota - EvFork = iota - EvChildExit = iota + EvCreate SandboxEventType = iota + EvDestroy = iota + EvDestroyIgnored = iota + EvPause = iota + EvUnpause = iota + EvFork = iota + EvChildExit = iota ) type SandboxEvent struct { diff --git a/src/worker/sandbox/cgroups/cgroup.go b/src/worker/sandbox/cgroups/cgroup.go index 7859ed52b..ca41d27a9 100644 --- a/src/worker/sandbox/cgroups/cgroup.go +++ b/src/worker/sandbox/cgroups/cgroup.go @@ -186,8 +186,9 @@ func (cg *CgroupImpl) AddPid(pid string) error { func (cg *CgroupImpl) setFreezeState(state int64) error { cg.WriteInt("cgroup.freeze", state) - timeout := 5 * time.Second - + timeout := 30 * time.Second + sleepDur := 1 * time.Millisecond + time.Sleep(sleepDur) start := time.Now() for { freezerState, err := cg.TryReadInt("cgroup.freeze") @@ -202,8 +203,8 @@ func (cg *CgroupImpl) setFreezeState(state int64) error { if time.Since(start) > timeout { return fmt.Errorf("cgroup stuck on %v after %v (should be %v)", freezerState, timeout, state) } - - time.Sleep(1 * time.Millisecond) + time.Sleep(sleepDur) + sleepDur += 2 } } diff --git a/src/worker/sandbox/evictors.go b/src/worker/sandbox/evictors.go index f5a1f0da4..44d57bf57 100644 --- a/src/worker/sandbox/evictors.go +++ b/src/worker/sandbox/evictors.go @@ -140,7 +140,7 @@ func (evictor *SOCKEvictor) updateState() { prio += 1 case EvPause: prio -= 1 - case EvFork: + case EvFork: // TODO: add a huge value to avoid eviction for zygote prio += 2 case EvChildExit: prio -= 2 @@ -172,12 +172,19 @@ func (evictor *SOCKEvictor) updateState() { } } +var EvictZygoteCnt = 0 +var EvictDict = make(map[int]int) + // evict whatever SB is at the front of the queue, assumes // queue is not empty func (evictor *SOCKEvictor) evictFront(queue *list.List, force bool) { front := queue.Front() sb := front.Value.(Sandbox) + if strings.Contains(sb.(*SafeSandbox).Sandbox.(*SOCKContainer).scratchDir, "import-cache") { + EvictZygoteCnt++ + EvictDict[sb.(*SafeSandbox).Sandbox.(*SOCKContainer).meta.SplitGeneration]++ + } evictor.printf("Evict Sandbox %v", sb.ID()) evictor.move(sb, evictor.evicting) @@ -232,7 +239,7 @@ func (evictor *SOCKEvictor) doEvictions() { if freeSandboxes <= 0 && evictor.evicting.Len() == 0 { evictor.printf("WARNING! Critically low on memory, so evicting an active Sandbox") if evictor.prioQueues[1].Len() > 0 { - evictor.evictFront(evictor.prioQueues[1], true) + //evictor.evictFront(evictor.prioQueues[1], true) } } diff --git a/src/worker/sandbox/safeSandbox.go b/src/worker/sandbox/safeSandbox.go index 9ab2ef522..3e8163d8a 100644 --- a/src/worker/sandbox/safeSandbox.go +++ b/src/worker/sandbox/safeSandbox.go @@ -17,7 +17,7 @@ import ( "github.com/open-lambda/open-lambda/ol/common" ) -type safeSandbox struct { +type SafeSandbox struct { Sandbox sync.Mutex @@ -30,20 +30,20 @@ type safeSandbox struct { // init is complete. // // the rational is that we might need to do some setup (e.g., forking) -// after a safeSandbox is created, and that setup may fail. We never +// after a SafeSandbox is created, and that setup may fail. We never // want to notify listeners about a Sandbox that isn't ready to go. // E.g., would be problematic if an evictor (which is listening) were // to try to evict concurrently with us creating processes in the // Sandbox as part of setup. -func newSafeSandbox(innerSB Sandbox) *safeSandbox { - sb := &safeSandbox{ +func newSafeSandbox(innerSB Sandbox) *SafeSandbox { + sb := &SafeSandbox{ Sandbox: innerSB, } return sb } -func (sb *safeSandbox) startNotifyingListeners(eventHandlers []SandboxEventFunc) { +func (sb *SafeSandbox) startNotifyingListeners(eventHandlers []SandboxEventFunc) { sb.Mutex.Lock() defer sb.Mutex.Unlock() sb.eventHandlers = eventHandlers @@ -51,20 +51,20 @@ func (sb *safeSandbox) startNotifyingListeners(eventHandlers []SandboxEventFunc) } // like regular printf, with suffix indicating which sandbox produced the message -func (sb *safeSandbox) printf(format string, args ...interface{}) { +func (sb *SafeSandbox) printf(format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) log.Printf("%s [SB %s]", strings.TrimRight(msg, "\n"), sb.Sandbox.ID()) } // propogate event to anybody who signed up to listen (e.g., an evictor) -func (sb *safeSandbox) event(evType SandboxEventType) { +func (sb *SafeSandbox) event(evType SandboxEventType) { for _, handler := range sb.eventHandlers { handler(evType, sb) } } // assumes lock is already held -func (sb *safeSandbox) destroyOnErr(funcName string, origErr error) { +func (sb *SafeSandbox) destroyOnErr(funcName string, origErr error) { if origErr != nil { sb.printf("Destroy() due to %v", origErr) sb.Sandbox.Destroy(fmt.Sprintf("%s returned %s", funcName, origErr)) @@ -75,7 +75,7 @@ func (sb *safeSandbox) destroyOnErr(funcName string, origErr error) { } } -func (sb *safeSandbox) Destroy(reason string) { +func (sb *SafeSandbox) Destroy(reason string) { sb.printf("Destroy()") t := common.T0("Destroy()") defer t.T1() @@ -97,7 +97,7 @@ func (sb *safeSandbox) Destroy(reason string) { sb.event(EvDestroy) } -func (sb *safeSandbox) DestroyIfPaused(reason string) { +func (sb *SafeSandbox) DestroyIfPaused(reason string) { sb.printf("DestroyIfPaused()") t := common.T0("DestroyIfPaused()") defer t.T1() @@ -117,7 +117,7 @@ func (sb *safeSandbox) DestroyIfPaused(reason string) { } } -func (sb *safeSandbox) Pause() (err error) { +func (sb *SafeSandbox) Pause() (err error) { sb.printf("Pause()") t := common.T0("Pause()") defer t.T1() @@ -140,7 +140,7 @@ func (sb *safeSandbox) Pause() (err error) { return nil } -func (sb *safeSandbox) Unpause() (err error) { +func (sb *SafeSandbox) Unpause() (err error) { sb.printf("Unpause()") t := common.T0("Unpause()") defer t.T1() @@ -167,7 +167,7 @@ func (sb *safeSandbox) Unpause() (err error) { return nil } -func (sb *safeSandbox) Client() (*http.Client) { +func (sb *SafeSandbox) Client() *http.Client { // According to the docs, "Clients and Transports are safe for // concurrent use by multiple goroutines and for efficiency // should only be created once and re-used." @@ -180,7 +180,7 @@ func (sb *safeSandbox) Client() (*http.Client) { } // fork (as a private method) doesn't cleanup parent sb if fork fails -func (sb *safeSandbox) fork(dst Sandbox) (err error) { +func (sb *SafeSandbox) fork(dst Sandbox) (err error) { sb.printf("fork(SB %v)", dst.ID()) t := common.T0("fork()") defer t.T1() @@ -199,7 +199,7 @@ func (sb *safeSandbox) fork(dst Sandbox) (err error) { return nil } -func (sb *safeSandbox) childExit(child Sandbox) { +func (sb *SafeSandbox) childExit(child Sandbox) { sb.printf("childExit(SB %v)", child.ID()) t := common.T0("childExit()") defer t.T1() @@ -218,7 +218,7 @@ func (sb *safeSandbox) childExit(child Sandbox) { } } -func (sb *safeSandbox) DebugString() string { +func (sb *SafeSandbox) DebugString() string { sb.Mutex.Lock() defer sb.Mutex.Unlock() diff --git a/src/worker/sandbox/sock.go b/src/worker/sandbox/sock.go index 52554e47f..6cf16958c 100644 --- a/src/worker/sandbox/sock.go +++ b/src/worker/sandbox/sock.go @@ -4,13 +4,13 @@ import ( "fmt" "io/ioutil" "log" - "sync/atomic" "net/http" "os" "os/exec" "path/filepath" "strconv" "strings" + "sync/atomic" "syscall" "time" @@ -27,8 +27,10 @@ type SOCKContainer struct { scratchDir string cg cgroups.Cgroup rtType common.RuntimeType - client *http.Client + client *http.Client + Node int + IsZygote bool // 1 for self, plus 1 for each child (we can't release memory // until all descendants are dead, because they share the // pages of this Container, but this is the only container @@ -70,10 +72,13 @@ func (container *SOCKContainer) freshProc() (err error) { var cmd *exec.Cmd + cgPath := "/default-ol-sandboxes/" + container.cg.Name() + // todo: make this a Cgroup method. if container.rtType == common.RT_PYTHON { cmd = exec.Command( + "sudo", "cgexec", "-g", "memory,cpu:"+cgPath, "chroot", container.containerRootDir, "python3", "-u", - "/runtimes/python/server.py", "/host/bootstrap.py", strconv.Itoa(1), + "/runtimes/python/server.py", "/host/bootstrap.py", strconv.Itoa(1), strconv.FormatBool(common.Conf.Features.Enable_seccomp), ) } else if container.rtType == common.RT_NATIVE { @@ -89,7 +94,6 @@ func (container *SOCKContainer) freshProc() (err error) { "chroot", container.containerRootDir, "env", "RUST_BACKTRACE=full", "/runtimes/native/server", strconv.Itoa(1), strconv.FormatBool(common.Conf.Features.Enable_seccomp), - ) } else { return fmt.Errorf("Unsupported runtime") @@ -192,6 +196,36 @@ func (container *SOCKContainer) populateRoot() (err error) { return fmt.Errorf("failed to make root dir private :: %v", err) } + // todo: now the packages' dir are read-only, is neccessary to remount the packages dir using overlayfs? + // todo: also, is it necessary to create a illusion like common site-packages dir? + // create a dir used to hidden the content in packages dir + //tmpEmptyDir, err := os.MkdirTemp("", "empty") + //if err != nil { + // log.Fatal(err) + //} + //if err := syscall.Mount(tmpEmptyDir, filepath.Join(container.containerRootDir, "packages"), "", common.BIND, ""); err != nil { + // return fmt.Errorf("failed to bind empty dir: %v", err) + //} + // + //for _, pkg := range container.meta.Installs { + // srcDirStr := filepath.Join(common.Conf.SOCK_base_path, "packages", pkg, "files") + // targetDirStr := filepath.Join(container.containerRootDir, "packages", pkg, "files") + // err := os.MkdirAll(targetDirStr, 0777) + // if err != nil { + // return err + // } + // + // if err := syscall.Mount(srcDirStr, targetDirStr, "", common.BIND, ""); err != nil { + // return fmt.Errorf("failed to bind package dir: %s -> %s :: %v", srcDirStr, targetDirStr, err) + // } + // if err := syscall.Mount("none", targetDirStr, "", common.BIND_RO, ""); err != nil { + // return fmt.Errorf("failed to bind package dir RO: %s :: %v", targetDirStr, err) + // } + // if err := syscall.Mount("none", targetDirStr, "", common.PRIVATE, ""); err != nil { + // return fmt.Errorf("failed to make package dir private :: %v", err) + // } + //} + // FILE SYSTEM STEP 2: code dir if container.codeDir != "" { sbCodeDir := filepath.Join(container.containerRootDir, "handler") @@ -336,20 +370,15 @@ func (container *SOCKContainer) fork(dst Sandbox) (err error) { return fmt.Errorf("only %vMB of spare memory in parent, rejecting fork request (need at least 3MB)", spareMB) } - // increment reference count before we start any processes + // increment reference count before we start any processes container.children[dst.ID()] = dst - newCount := atomic.AddInt32(&container.cgRefCount, 1) + newCount := atomic.AddInt32(&container.cgRefCount, 1) if newCount == 0 { panic("cgRefCount was already 0") } - dstSock := dst.(*safeSandbox).Sandbox.(*SOCKContainer) - - origPids, err := container.cg.GetPIDs() - if err != nil { - return err - } + dstSock := dst.(*SafeSandbox).Sandbox.(*SOCKContainer) root, err := os.Open(dstSock.containerRootDir) if err != nil { @@ -371,44 +400,6 @@ func (container *SOCKContainer) fork(dst Sandbox) (err error) { } t.T1() - // move new PIDs to new cgroup. - // - // Make multiple passes in case new processes are being - // spawned (TODO: better way to do this? This lets a forking - // process potentially kill our cache entry, which isn't - // great). - t = common.T0("move-to-cg-after-fork") - for { - currPids, err := container.cg.GetPIDs() - if err != nil { - return err - } - - moved := 0 - - for _, pid := range currPids { - isOrig := false - for _, origPid := range origPids { - if pid == origPid { - isOrig = true - break - } - } - if !isOrig { - container.printf("move PID %v from CG %v to CG %v\n", pid, container.cg.Name(), dstSock.cg.Name()) - if err = dstSock.cg.AddPid(pid); err != nil { - return err - } - moved++ - } - } - - if moved == 0 { - break - } - } - t.T1() - return nil } @@ -416,7 +407,7 @@ func (container *SOCKContainer) Meta() *SandboxMeta { return container.meta } -func (container *SOCKContainer) Client() (*http.Client) { +func (container *SOCKContainer) Client() *http.Client { return container.client } diff --git a/src/worker/sandbox/sockPool.go b/src/worker/sandbox/sockPool.go index 2911ab837..38c1f26b1 100644 --- a/src/worker/sandbox/sockPool.go +++ b/src/worker/sandbox/sockPool.go @@ -1,14 +1,15 @@ package sandbox import ( + "encoding/json" "fmt" "io/ioutil" "log" + "net" + "net/http" "path/filepath" "strings" "sync/atomic" - "net" - "net/http" "time" "github.com/open-lambda/open-lambda/ol/common" @@ -64,6 +65,27 @@ func sbStr(sb Sandbox) string { return fmt.Sprintf("", sb.ID()) } +func importLines(modules []string) (string, error) { + if len(modules) == 0 { + return "", nil + } + modulesStr, err := json.Marshal(modules) + if err != nil { + fmt.Println("Error marshalling JSON:", err) + return "", nil + } + code := fmt.Sprintf(` +os.environ['OPENBLAS_NUM_THREADS'] = '2' +for mod in %s: + try: + importlib.import_module(mod) + except Exception as e: + pass +print('Imported') + `, modulesStr) + return code, nil +} + func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir string, meta *SandboxMeta, rtType common.RuntimeType) (sb Sandbox, err error) { id := fmt.Sprintf("%d", atomic.AddInt64(&nextId, 1)) meta = fillMetaDefaults(meta) @@ -86,6 +108,8 @@ func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir st meta: meta, rtType: rtType, containerProxy: nil, + IsZygote: !isLeaf, + Node: -1, // -1 by default, if it has a parent, it will be set to the parent's node later } var c Sandbox = cSock @@ -103,7 +127,8 @@ func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir st moveMemCharge := (parent == nil) cSock.cg = pool.cgPool.GetCg(meta.MemLimitMB, moveMemCharge, meta.CPUPercent) t2.T1() - cSock.printf("use cgroup %s", cSock.cg.Name) + cgName := cSock.cg.Name() + cSock.printf("use cgroup %s", cgName) defer func() { if err != nil { @@ -125,21 +150,18 @@ func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir st if rtType == common.RT_PYTHON { // add installed packages to the path, and import the modules we'll need var pyCode []string - + // by this step, all packages are guaranteed to be installed in pullHandlerIfStale() for _, pkg := range meta.Installs { path := "'/packages/" + pkg + "/files'" - pyCode = append(pyCode, "if os.path.exists("+path+"):") - pyCode = append(pyCode, " if not "+path+" in sys.path:") - pyCode = append(pyCode, " sys.path.insert(0, "+path+")") + pyCode = append(pyCode, "if not "+path+" in sys.path:") + pyCode = append(pyCode, " sys.path.insert(0, "+path+")") } - // we need handle any possible error while importing a module - for _, mod := range meta.Imports { - pyCode = append(pyCode, "try:") - pyCode = append(pyCode, " import "+mod) - pyCode = append(pyCode, "except Exception as e:") - pyCode = append(pyCode, " print('bootstrap.py error:', e)") + lines, err := importLines(meta.Imports) + if err != nil { + log.Printf("Error generating import lines: %v", err) } + pyCode = append(pyCode, lines) // handler or Zygote? if isLeaf { @@ -174,6 +196,7 @@ func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir st } else { t2 := t.T0("fresh-proc") if err := cSock.freshProc(); err != nil { + fmt.Printf("freshProc error: %s \n", err.Error()) return nil, err } t2.T1() @@ -192,7 +215,7 @@ func (pool *SOCKPool) Create(parent Sandbox, isLeaf bool, codeDir, scratchDir st cSock.client = &http.Client{ Transport: &http.Transport{Dial: dial}, - Timeout: time.Second * time.Duration(common.Conf.Limits.Max_runtime_default), + Timeout: time.Second * time.Duration(common.Conf.Limits.Max_runtime_default), } // event handling diff --git a/src/worker/server/lambdaServer.go b/src/worker/server/lambdaServer.go index 1afa09121..0b162a985 100644 --- a/src/worker/server/lambdaServer.go +++ b/src/worker/server/lambdaServer.go @@ -59,6 +59,9 @@ func (s *LambdaServer) RunLambda(w http.ResponseWriter, r *http.Request) { if len(urlParts) == 2 { img := urlParts[1] s.lambdaMgr.Get(img).Invoke(w, r) + // time.Sleep(3 * time.Millisecond) + // w.WriteHeader(http.StatusOK) + // w.Write([]byte("Place Holder, testing boss throughput limit")) } else { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte("expected invocation format: /run/")) @@ -83,6 +86,15 @@ func NewLambdaServer() (*LambdaServer, error) { return nil, err } + warmup := common.Conf.Features.Warmup + if warmup { + log.Printf("Warming up lambda server") + err = lambdaMgr.Warmup() + if err != nil { + log.Printf("Error warming up lambda server: %s", err.Error()) + } + } + server := &LambdaServer{ lambdaMgr: lambdaMgr, } diff --git a/src/worker/server/server.go b/src/worker/server/server.go index 820558304..9740128e7 100644 --- a/src/worker/server/server.go +++ b/src/worker/server/server.go @@ -12,21 +12,21 @@ import ( "runtime" "runtime/pprof" "strconv" + "sync" "syscall" - "sync" "github.com/open-lambda/open-lambda/ol/common" ) const ( - RUN_PATH = "/run/" - PID_PATH = "/pid" - STATUS_PATH = "/status" - STATS_PATH = "/stats" - DEBUG_PATH = "/debug" - PPROF_MEM_PATH = "/pprof/mem" + RUN_PATH = "/run/" + PID_PATH = "/pid" + STATUS_PATH = "/status" + STATS_PATH = "/stats" + DEBUG_PATH = "/debug" + PPROF_MEM_PATH = "/pprof/mem" PPROF_CPU_START_PATH = "/pprof/cpu-start" - PPROF_CPU_STOP_PATH = "/pprof/cpu-stop" + PPROF_CPU_STOP_PATH = "/pprof/cpu-stop" ) type cleanable interface { @@ -35,6 +35,7 @@ type cleanable interface { // temporary file storing cpu profiled data const CPU_TEMP_PATTERN = ".cpu.*.prof" + var cpuTemp *os.File = nil var lock sync.Mutex @@ -83,7 +84,7 @@ func doCpuStart() error { if cpuTemp != nil { return fmt.Errorf("Already started cpu profiling\n") } - + // fresh cpu profiling temp, err := os.CreateTemp("", CPU_TEMP_PATTERN) if err != nil { @@ -132,7 +133,7 @@ func PprofCpuStop(w http.ResponseWriter, r *http.Request) { cpuTemp.Close() cpuTemp = nil defer os.Remove(tempFilename) // deferred cleanup - + // read data from file log.Printf("Reading from %s\n", tempFilename) buffer, err := ioutil.ReadFile(tempFilename) @@ -156,24 +157,24 @@ func shutdown(pidPath string, server cleanable) { snapshot := common.SnapshotStats() rc := 0 - // "cpu-start"ed but have not "cpu-stop"ped before kill - log.Printf("save buffered profiled data to cpu.buf.prof\n") - if cpuTemp != nil { - pprof.StopCPUProfile() - filename := cpuTemp.Name() - cpuTemp.Close() - - in, err := ioutil.ReadFile(filename) - if err != nil { - log.Printf("error: %s", err) - rc = 1 - } else if err = ioutil.WriteFile("cpu.buf.prof", in, 0644); err != nil{ - log.Printf("error: %s", err) - rc = 1 - } - - os.Remove(filename) - } + // "cpu-start"ed but have not "cpu-stop"ped before kill + log.Printf("save buffered profiled data to cpu.buf.prof\n") + if cpuTemp != nil { + pprof.StopCPUProfile() + filename := cpuTemp.Name() + cpuTemp.Close() + + in, err := ioutil.ReadFile(filename) + if err != nil { + log.Printf("error: %s", err) + rc = 1 + } else if err = ioutil.WriteFile("cpu.buf.prof", in, 0644); err != nil { + log.Printf("error: %s", err) + rc = 1 + } + + os.Remove(filename) + } log.Printf("save stats to %s", statsPath) if s, err := json.MarshalIndent(snapshot, "", "\t"); err != nil {