Commit 07cc8483 by 20210828028

..

parent e2387408
Command exited with non-zero status 127
0:00.00 0.00 0.00 1220 0 127 "test.sh"
Command exited with non-zero status 127
0:00.00 0.00 0.00 1148 0 127 "test.sh"
Command exited with non-zero status 1
14:01:05 35544.04 11439.93 2680460 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 --data_dir ../data/SST-2.full --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir test_out --do_train --do_eval --ipet_generations 2 --sc_per_gpu_train_batch_size 1024 --sc_per_gpu_unlabeled_batch_size 1024 --sc_per_gpu_eval_batch_size 1024 --eval_set test"
Command exited with non-zero status 1
14:01:05 35544.05 11439.94 2680460 0 1 "bash test.sh"
Command exited with non-zero status 1
0:00.05 0.03 0.01 9872 0 1 "python3 cli.py --method pet --pattern_ids 0 1 5 4 --data_dir ../data/SST-2 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir test1_out --do_train --do_eval --ipet_generations 2 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:00.06 0.03 0.02 9872 0 1 "bash test1.sh"
Command exited with non-zero status 1
0:01.17 1.73 4.15 219240 0 1 "python3 cli.py --method pet --pattern_ids 0 1 5 4 --data_dir ../data/SST-2 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir test1_out --do_train --do_eval --ipet_generations 2 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:01.19 1.73 4.16 219240 0 1 "bash test1.sh"
Command exited with non-zero status 1
12:32.13 474.15 175.70 3540124 0 1 "python3 cli.py --method pet --pattern_ids 0 1 5 4 --data_dir ../data/SST-2 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir test1_out --do_train --do_eval --ipet_generations 2 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
12:32.14 474.16 175.71 3540124 0 1 "bash test1.sh"
Command exited with non-zero status 1
11:57.81 479.87 178.59 3539344 0 1 "python3 cli.py --method pet --pattern_ids 0 1 5 6 --data_dir ../data/SST-2 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir test1_out2 --do_train --do_eval --ipet_generations 2 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
11:57.82 479.88 178.60 3539344 0 1 "bash test1.sh"
Command exited with non-zero status 1
3:18.78 100.58 29.71 4006348 0 1 "python3 cli.py --method pet --pattern_ids 0 1 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g5 --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
3:18.79 100.59 29.71 4006348 0 1 "bash test2.sh"
Command exited with non-zero status 1
10:13:02 28028.23 8381.65 4013948 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g5_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
10:13:02 28028.24 8381.65 4013948 0 1 "bash test2.sh"
Command exited with non-zero status 1
0:01.44 2.38 6.40 256948 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:08.20 5.44 4.31 1085160 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:07.90 5.62 4.61 1085104 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:07.92 5.52 4.43 1085360 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
0:08.15 5.46 4.68 1084860 0 1 "python3 cli.py --method ipet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_ipet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
1:03.69 36.70 18.71 3531432 0 1 "python3 cli.py --method pet --pattern_ids 0 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name my-task --output_dir my_task_size32_pet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
Command exited with non-zero status 1
7:08.34 214.90 96.05 3995576 0 1 "python3 cli.py --method pet --pattern_ids 0 1 2 3 4 5 6 --data_dir ../data/SST-2.32 --model_type bert --model_name_or_path bert-base-uncased --task_name autobest5 --output_dir size32_g2_pet --do_train --do_eval --ipet_generations 5 --sc_per_gpu_train_batch_size 32 --sc_per_gpu_unlabeled_batch_size 32 --sc_per_gpu_eval_batch_size 32 --eval_set test"
date
python3 cli.py \
--method ipet \
--pattern_ids 0 1 2 3 4 \
--data_dir ../glue_data/SST-2 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir autobest5_ipet6 \
--do_train \
--do_eval \
--eval_set dev && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
conda create -y -n pet python==3.7
conda activate pet
pip install -r requirements.txt
date
rm test_out -fr
../track python3 cli.py \
--method ipet \
--pattern_ids 0 1 2 3 4 \
--data_dir ../data/SST-2.full \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir test_out \
--do_train \
--do_eval \
--ipet_generations 2 \
--sc_per_gpu_train_batch_size 1024 \
--sc_per_gpu_unlabeled_batch_size 1024 --sc_per_gpu_eval_batch_size 1024 \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
date
out_dir=test1_out2
batch_size=32
rm $out_dir -fr
../track python3 cli.py \
--method pet \
--pattern_ids 0 1 5 6 \
--data_dir ../data/SST-2 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir $out_dir \
--do_train \
--do_eval \
--ipet_generations 2 \
--sc_per_gpu_train_batch_size $batch_size \
--sc_per_gpu_unlabeled_batch_size $batch_size --sc_per_gpu_eval_batch_size $batch_size \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
date
out_dir=size32_g5_ipet
batch_size=32
rm $out_dir -fr
../track python3 cli.py \
--method ipet \
--pattern_ids 0 1 5 6 \
--data_dir ../data/SST-2.32 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir $out_dir \
--do_train \
--do_eval \
--ipet_generations 5 \
--sc_per_gpu_train_batch_size $batch_size \
--sc_per_gpu_unlabeled_batch_size $batch_size --sc_per_gpu_eval_batch_size $batch_size \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
date
out_dir=size32_g2_ipet
batch_size=32
rm $out_dir -fr
../track python3 cli.py \
--method ipet \
--pattern_ids 0 1 2 3 4 5 6 \
--data_dir ../data/SST-2.32 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir $out_dir \
--do_train \
--do_eval \
--ipet_generations 5 \
--sc_per_gpu_train_batch_size $batch_size \
--sc_per_gpu_unlabeled_batch_size $batch_size --sc_per_gpu_eval_batch_size $batch_size \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
date
out_dir=my_task_size32_pet
batch_size=32
rm $out_dir -fr
../track python3 cli.py \
--method pet \
--pattern_ids 0 \
--data_dir ../data/SST-2.32 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name my-task \
--output_dir $out_dir \
--do_train \
--do_eval \
--ipet_generations 5 \
--sc_per_gpu_train_batch_size $batch_size \
--sc_per_gpu_unlabeled_batch_size $batch_size --sc_per_gpu_eval_batch_size $batch_size \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
date
out_dir=size32_g2_pet
batch_size=32
rm $out_dir -fr
../track python3 cli.py \
--method pet \
--pattern_ids 0 1 2 3 4 5 6 \
--data_dir ../data/SST-2.32 \
--model_type bert \
--model_name_or_path bert-base-uncased \
--task_name autobest5 \
--output_dir $out_dir \
--do_train \
--do_eval \
--ipet_generations 5 \
--sc_per_gpu_train_batch_size $batch_size \
--sc_per_gpu_unlabeled_batch_size $batch_size --sc_per_gpu_eval_batch_size $batch_size \
--eval_set test && \
date
#--data_dir /data/projects/LM-BFF/data/k-shot/SST-2/16-100/ \
#--data_dir ../glue_data/SST-2 \
{
{
"cells": [
{
"cell_type": "code",
"execution_count": 12,
"id": "76bde746-5ed4-479d-91f7-0d356cc15ac4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/home/mist/projects/group1_pretraining/pet-master\n"
]
}
],
"source": [
"%cd pet-master"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "c9cd52d6-43da-4acc-81fe-a384ea5d50ae",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/usr/local/bin/python\n"
]
}
],
"source": [
"!which python"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "51b0fe0b-956d-4f04-a90b-e69b24fd3dc4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zsh:source:1: no such file or directory: /mistgpu/miniconda/envs/pet/bin/activate\n"
]
}
],
"source": [
"!source /mistgpu/miniconda/envs/pet/bin/activate"
]
},
{
"cell_type": "code",
"execution_count": 120,
"id": "062104ba-807a-42cb-bf74-f09b83262d07",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"size32_g2_pet/p1-i2/pytorch_model.bin\n"
]
}
],
"source": [
"ls size32_g2_pet/p1-i2/pytorch_model.bin "
]
},
{
"cell_type": "code",
"execution_count": 122,
"id": "d1c8f17b-cc0d-49c5-a2a0-6114cea51871",
"metadata": {},
"outputs": [],
"source": [
"import pickle"
]
},
{
"cell_type": "code",
"execution_count": 124,
"id": "d1abb14f-4bd4-4ab0-b4ad-97f91d0ed9e5",
"metadata": {},
"outputs": [],
"source": [
"m = pickle.load(open('size32_g2_pet/p1-i2/pytorch_model.bin','rb'))"
]
},
{
"cell_type": "code",
"execution_count": 126,
"id": "ca3d3c13-2e51-4c6d-b5af-f4a59d089323",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['__abs__',\n",
" '__add__',\n",
" '__and__',\n",
" '__bool__',\n",
" '__ceil__',\n",
" '__class__',\n",
" '__delattr__',\n",
" '__dir__',\n",
" '__divmod__',\n",
" '__doc__',\n",
" '__eq__',\n",
" '__float__',\n",
" '__floor__',\n",
" '__floordiv__',\n",
" '__format__',\n",
" '__ge__',\n",
" '__getattribute__',\n",
" '__getnewargs__',\n",
" '__gt__',\n",
" '__hash__',\n",
" '__index__',\n",
" '__init__',\n",
" '__init_subclass__',\n",
" '__int__',\n",
" '__invert__',\n",
" '__le__',\n",
" '__lshift__',\n",
" '__lt__',\n",
" '__mod__',\n",
" '__mul__',\n",
" '__ne__',\n",
" '__neg__',\n",
" '__new__',\n",
" '__or__',\n",
" '__pos__',\n",
" '__pow__',\n",
" '__radd__',\n",
" '__rand__',\n",
" '__rdivmod__',\n",
" '__reduce__',\n",
" '__reduce_ex__',\n",
" '__repr__',\n",
" '__rfloordiv__',\n",
" '__rlshift__',\n",
" '__rmod__',\n",
" '__rmul__',\n",
" '__ror__',\n",
" '__round__',\n",
" '__rpow__',\n",
" '__rrshift__',\n",
" '__rshift__',\n",
" '__rsub__',\n",
" '__rtruediv__',\n",
" '__rxor__',\n",
" '__setattr__',\n",
" '__sizeof__',\n",
" '__str__',\n",
" '__sub__',\n",
" '__subclasshook__',\n",
" '__truediv__',\n",
" '__trunc__',\n",
" '__xor__',\n",
" 'bit_length',\n",
" 'conjugate',\n",
" 'denominator',\n",
" 'from_bytes',\n",
" 'imag',\n",
" 'numerator',\n",
" 'real',\n",
" 'to_bytes']"
]
},
"execution_count": 126,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dir(m)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "bdebacb6-bb36-4119-97b6-c831d6ffa1cc",
"metadata": {
"collapsed": true,
"jupyter": {
"outputs_hidden": true
},
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Defaulting to user installation because normal site-packages is not writeable\n",
"Looking in indexes: https://mirrors.aliyun.com/pypi/simple/\n",
"Looking in links: https://download.pytorch.org/whl/torch_stable.html\n",
"Collecting numpy==1.19\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/93/0b/71ae818646c1a80fbe6776d41f480649523ed31243f1f34d9d7e41d70195/numpy-1.19.0-cp36-cp36m-manylinux2010_x86_64.whl (14.6 MB)\n",
"Collecting jsonpickle==1.1\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/dc/12/8c44eabb501e2bc0aec0dd152b328074d98a50968d3a02be28f6037f0c6a/jsonpickle-1.1-py2.py3-none-any.whl (31 kB)\n",
"Collecting scikit-learn==0.23.1\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/d9/3a/eb8d7bbe28f4787d140bb9df685b7d5bf6115c0e2a969def4027144e98b6/scikit_learn-0.23.1-cp36-cp36m-manylinux1_x86_64.whl (6.8 MB)\n",
"Collecting torch===1.5.0\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/13/70/54e9fb010fe1547bc4774716f11ececb81ae5b306c05f090f4461ee13205/torch-1.5.0-cp36-cp36m-manylinux1_x86_64.whl (752.0 MB)\n",
"Collecting torchvision==0.6.0\n",
" Downloading https://download.pytorch.org/whl/cu92/torchvision-0.6.0%2Bcu92-cp36-cp36m-linux_x86_64.whl (6.5 MB)\n",
" |████████████████████████████████| 6.5 MB 115 kB/s \n",
"\u001b[?25hCollecting transformers==3.0.2\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/27/3c/91ed8f5c4e7ef3227b4119200fc0ed4b4fd965b1f0172021c25701087825/transformers-3.0.2-py3-none-any.whl (769 kB)\n",
"Collecting tqdm==4.48.1\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/43/74/ff00ba1f998684ffc67ea700ed45e5099877906ac36ca6d7b5d9695e6166/tqdm-4.48.1-py2.py3-none-any.whl (68 kB)\n",
"Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.23.1->-r pet-master/requirements.txt (line 4)) (2.2.0)\n",
"Requirement already satisfied: scipy>=0.19.1 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.23.1->-r pet-master/requirements.txt (line 4)) (1.5.4)\n",
"Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn==0.23.1->-r pet-master/requirements.txt (line 4)) (1.0.1)\n",
"Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch===1.5.0->-r pet-master/requirements.txt (line 5)) (0.18.2)\n",
"Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.6/dist-packages (from torchvision==0.6.0->-r pet-master/requirements.txt (line 6)) (8.3.1)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (21.0)\n",
"Collecting tokenizers==0.8.1.rc1\n",
" Downloading https://mirrors.aliyun.com/pypi/packages/40/d0/30d5f8d221a0ed981a186c8eb986ce1c94e3a6e87f994eae9f4aa5250217/tokenizers-0.8.1rc1-cp36-cp36m-manylinux1_x86_64.whl (3.0 MB)\n",
" |████████████████████████████████| 3.0 MB 1.4 MB/s \n",
"\u001b[?25hRequirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (0.8)\n",
"Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (2.26.0)\n",
"Collecting sentencepiece!=0.1.92\n",
" Downloading https://mirrors.aliyun.com/pypi/packages/5b/49/2155d4078e9918003e77b6032a83d71995656bd05707d96e06a44cd6edf6/sentencepiece-0.1.96-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.2 MB)\n",
" |████████████████████████████████| 1.2 MB 29.7 MB/s \n",
"\u001b[?25hCollecting filelock\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/31/24/ee722b92f23b9ebd87783e893a75352c048bbbc1f67dce0d63b58b46cb48/filelock-3.3.2-py3-none-any.whl (9.7 kB)\n",
"Collecting sacremoses\n",
" Using cached https://mirrors.aliyun.com/pypi/packages/36/bf/15f8df78bce5eee8223553123173f010d426565980e457c559a71ecbecc3/sacremoses-0.0.46-py3-none-any.whl (895 kB)\n",
"Collecting regex!=2019.12.17\n",
" Downloading https://mirrors.aliyun.com/pypi/packages/af/75/3c6935c87ee0e4e682a6138d7590e5be9ca2f400212c30cf0f701f6011b5/regex-2021.10.23-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (748 kB)\n",
" |████████████████████████████████| 748 kB 37.3 MB/s \n",
"\u001b[?25hRequirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from packaging->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (2.4.7)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (2.9)\n",
"Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (2.0.4)\n",
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (1.26.6)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (2021.5.30)\n",
"Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from sacremoses->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (1.15.0)\n",
"Requirement already satisfied: click in /usr/lib/python3/dist-packages (from sacremoses->transformers==3.0.2->-r pet-master/requirements.txt (line 7)) (6.7)\n",
"Installing collected packages: tqdm, regex, numpy, torch, tokenizers, sentencepiece, sacremoses, filelock, transformers, torchvision, scikit-learn, jsonpickle\n",
"\u001b[33m WARNING: Value for scheme.platlib does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/site-packages\n",
" sysconfig: /home/mist/.local/lib/python3.6/site-packages\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.purelib does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/site-packages\n",
" sysconfig: /home/mist/.local/lib/python3.6/site-packages\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/tqdm\n",
" sysconfig: /home/mist/.local/include/python3.6/tqdm\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.scripts does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/bin\n",
" sysconfig: /home/mist/.local/bin\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.data does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip\n",
" sysconfig: /home/mist/.local\u001b[0m\n",
"\u001b[33m WARNING: Additional context:\n",
" user = True\n",
" home = None\n",
" root = None\n",
" prefix = None\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/regex\n",
" sysconfig: /home/mist/.local/include/python3.6/regex\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/numpy\n",
" sysconfig: /home/mist/.local/include/python3.6/numpy\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/torch\n",
" sysconfig: /home/mist/.local/include/python3.6/torch\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/tokenizers\n",
" sysconfig: /home/mist/.local/include/python3.6/tokenizers\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/sentencepiece\n",
" sysconfig: /home/mist/.local/include/python3.6/sentencepiece\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/sacremoses\n",
" sysconfig: /home/mist/.local/include/python3.6/sacremoses\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/filelock\n",
" sysconfig: /home/mist/.local/include/python3.6/filelock\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/transformers\n",
" sysconfig: /home/mist/.local/include/python3.6/transformers\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/torchvision\n",
" sysconfig: /home/mist/.local/include/python3.6/torchvision\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/scikit-learn\n",
" sysconfig: /home/mist/.local/include/python3.6/scikit-learn\u001b[0m\n",
"\u001b[33m WARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
" distutils: /mistgpu/pip/include/python3.6m/jsonpickle\n",
" sysconfig: /home/mist/.local/include/python3.6/jsonpickle\u001b[0m\n",
"\u001b[33mWARNING: Value for scheme.headers does not match. Please report this to <https://github.com/pypa/pip/issues/10151>\n",
"distutils: /mistgpu/pip/include/python3.6m/UNKNOWN\n",
"sysconfig: /home/mist/.local/include/python3.6/UNKNOWN\u001b[0m\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"tensorflow 2.6.0 requires numpy~=1.19.2, but you have numpy 1.19.0 which is incompatible.\u001b[0m\n",
"Successfully installed filelock-3.3.2 jsonpickle-1.1 numpy-1.19.0 regex-2021.10.23 sacremoses-0.0.46 scikit-learn-0.23.1 sentencepiece-0.1.96 tokenizers-0.8.1rc1 torch-1.5.0 torchvision-0.6.0+cu92 tqdm-4.48.1 transformers-3.0.2\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"pip install -r requirements.txt"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6a518d5a-62a5-46d3-a6dd-b9b20575d5f6",
"metadata": {},
"outputs": [],
"source": [
"from transformers import InputExample, AdamW, get_linear_schedule_with_warmup, PreTrainedTokenizer, BertForMaskedLM, \\\n",
" RobertaForMaskedLM, XLMRobertaForMaskedLM, XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer, \\\n",
" XLNetLMHeadModel, BertConfig, BertForSequenceClassification, BertTokenizer, RobertaConfig, \\\n",
" RobertaForSequenceClassification, RobertaTokenizer, XLMRobertaConfig, XLMRobertaForSequenceClassification, \\\n",
" XLMRobertaTokenizer, AlbertForSequenceClassification, AlbertForMaskedLM, AlbertTokenizer, AlbertConfig, \\\n",
" GPT2Config, GPT2LMHeadModel, GPT2Tokenizer"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0f685928-5ab3-429e-aef1-6a0133db714a",
"metadata": {},
"outputs": [],
"source": [
"model_class = BertForMaskedLM#MODEL_CLASSES[wrapper.config.model_type][wrapper.config.wrapper_type]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1a483a23-b646-4998-ad16-10f92a942ba1",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
"- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n",
"- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
"Some weights of BertForMaskedLM were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['cls.predictions.decoder.bias']\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
]
}
],
"source": [
"model = model_class.from_pretrained('bert-base-uncased')"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "85453e78-a45f-4536-92e5-0f1b907eb353",
"metadata": {},
"outputs": [],
"source": [
"tokenizer_class = BertTokenizer"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "dbbdf4f0-452b-4c62-b2c8-03410c1e5df3",
"metadata": {},
"outputs": [],
"source": [
"tokenizer = tokenizer_class.from_pretrained('bert-base-uncased')"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "e1448357-8003-4a98-8b81-7b1d10090fd3",
"metadata": {},
"outputs": [],
"source": [
"from pet.pvp import *\n",
"from pet.tasks import *"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "85fd8502-640d-4a22-bdc3-84f3dcada45f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
"- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n",
"- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
"Some weights of BertForMaskedLM were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['cls.predictions.decoder.bias']\n",
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
]
}
],
"source": [
"from transformers import BertTokenizer, BertForMaskedLM\n",
"import torch\n",
"\n",
"tokenizer = BertTokenizer.from_pretrained('bert-base-uncased'\n",
" # , cache_dir='E:/Projects/albert/'\n",
" )\n",
"model = BertForMaskedLM.from_pretrained('bert-base-uncased')\n",
"\n",
"sentence = \"It is a very beautiful book.\"\n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "aa63577b-7f39-40f3-a65b-c3a3ec21df7e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['it', 'is', 'a', 'very', 'beautiful', 'book', '.']"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tokenizer.tokenize(sentence)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "eadc3826-6e0d-48d5-97b3-e66fb7b3decb",
"metadata": {},
"outputs": [],
"source": [
"tokens = ['[CLS]'] + tokenizer.tokenize(sentence) + ['[SEP]']\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "08b686b4-d4d8-4530-b622-a1969d85f713",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['[CLS]', '[MASK]', 'is', 'a', 'very', 'beautiful', 'book', '.', '[SEP]']\n",
"it\n",
"['[CLS]', 'it', '[MASK]', 'a', 'very', 'beautiful', 'book', '.', '[SEP]']\n",
"is\n",
"['[CLS]', 'it', 'is', '[MASK]', 'very', 'beautiful', 'book', '.', '[SEP]']\n",
"a\n",
"['[CLS]', 'it', 'is', 'a', '[MASK]', 'beautiful', 'book', '.', '[SEP]']\n",
"very\n",
"['[CLS]', 'it', 'is', 'a', 'very', '[MASK]', 'book', '.', '[SEP]']\n",
"good\n",
"['[CLS]', 'it', 'is', 'a', 'very', 'beautiful', '[MASK]', '.', '[SEP]']\n",
"place\n",
"['[CLS]', 'it', 'is', 'a', 'very', 'beautiful', 'book', '[MASK]', '[SEP]']\n",
".\n"
]
}
],
"source": [
"\n",
"# i就是被mask掉的id\n",
"for i in range(1, len(tokens)-1):\n",
" tmp = tokens[:i] + ['[MASK]'] + tokens[i+1:]\n",
" masked_ids = torch.tensor([tokenizer.convert_tokens_to_ids(tmp)])\n",
" segment_ids = torch.tensor([[0]*len(tmp)])\n",
"\n",
" outputs = model(masked_ids, token_type_ids=segment_ids)\n",
" prediction_scores = outputs[0]\n",
" print(tmp)\n",
" # 打印被预测的字符\n",
" prediction_index = torch.argmax(prediction_scores[0, i]).item()\n",
" predicted_token = tokenizer.convert_ids_to_tokens([prediction_index])[0]\n",
" print(predicted_token)\n"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "2186e0bd-c7ed-4cf5-9744-0122083949a6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1012"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prediction_index"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "ddfbc826-c4eb-49ff-bb8f-cb3a77e4635f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['.']"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tokenizer.convert_ids_to_tokens([prediction_index])"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "8fda8eec-ca52-4d41-ba68-280ebc6a92e8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[ 101, 2009, 2003, 1037, 2200, 3376, 2338, 103, 102]])"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"masked_ids\n"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "9d3c8579-32be-4c94-bc47-e720b8fda56f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['[CLS]', 'it', 'is', 'a', 'very', 'beautiful', 'book', '[MASK]', '[SEP]']"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tmp"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "916af908-e1a2-4718-bb4d-eaf320997a2e",
"metadata": {},
"outputs": [],
"source": [
"text = 'a stirring , funny and finally transporting re-imagining of beauty and the beast and 1930s horror films'"
]
},
{
"cell_type": "code",
"execution_count": 52,
"id": "0a4b8a14-ebaa-432a-b3e2-8de5f5f032db",
"metadata": {},
"outputs": [],
"source": [
"tmp = \\\n",
" ['[CLS]']+ \\\n",
" tokenizer.tokenize(text) + \\\n",
" tokenizer.tokenize('Overall, It is') + \\\n",
" ['[MASK]'] + \\\n",
" tokenizer.tokenize('.') + \\\n",
" ['[SEP]']"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "58b36132-1500-408b-a599-22153eeaeb51",
"metadata": {},
"outputs": [],
"source": [
"masked_ids = torch.tensor([tokenizer.convert_tokens_to_ids(tmp)])\n",
"segment_ids = torch.tensor([[0]*len(tmp)])"
]
},
{
"cell_type": "code",
"execution_count": 73,
"id": "04a56cd7-281f-48e1-9e1e-01dc35974ca0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"fantastic\n"
]
}
],
"source": [
"outputs = model(masked_ids, token_type_ids=segment_ids)\n",
"prediction_scores = outputs[0]\n",
"prediction_index = torch.argmax(prediction_scores[0, tmp.index('[MASK]')]).item()\n",
"predicted_token = tokenizer.convert_ids_to_tokens([prediction_index])[0]\n",
"print(predicted_token)\n"
]
},
{
"cell_type": "code",
"execution_count": 66,
"id": "b6248a5a-5a54-4cfb-91be-857dc7551ad1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3048"
]
},
"execution_count": 66,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prediction_index"
]
},
{
"cell_type": "code",
"execution_count": 72,
"id": "f33c42e7-14e7-4525-9b6f-aaf4b123db03",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"24"
]
},
"execution_count": 72,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"m2c = self.mlm_logits_to_cls_logits_tensor.to(logits.device)\n",
"# filler_len.shape() == max_fillers\n",
"filler_len = torch.tensor([len(self.verbalize(label)) for label in self.wrapper.config.label_list],\n",
" dtype=torch.float)\n",
"filler_len = filler_len.to(logits.device)\n",
"\n",
"# cls_logits.shape() == num_labels x max_fillers (and 0 when there are not as many fillers).\n",
"cls_logits = logits[torch.max(torch.zeros_like(m2c), m2c)]\n",
"cls_logits = cls_logits * (m2c > 0).float()\n",
"\n",
"# cls_logits.shape() == num_labels\n",
"cls_logits = cls_logits.sum(axis=1) / filler_len\n",
"return cls_logits\n"
]
},
{
"cell_type": "code",
"execution_count": 99,
"id": "a8688182-b0c1-4d2c-aa00-b3f101c28f50",
"metadata": {},
"outputs": [],
"source": [
"VERBALIZER = {1: [\"Good\"], # \"breathtaking\",\n",
" 0: [\"Bad\"],} \n",
"label_list = [0,1]\n",
"def mlm_logits_to_cls_logits_tensor():\n",
" \n",
" max_num_verbalizers = 1\n",
" m2c_tensor = torch.ones([len(label_list), max_num_verbalizers], dtype=torch.long) * -1\n",
"\n",
" for label_idx, label in enumerate(label_list):\n",
" verbalizers = VERBALIZER[label]\n",
" for verbalizer_idx, verbalizer in enumerate(verbalizers):\n",
" verbalizer_id = get_verbalization_ids(verbalizer, tokenizer, force_single_token=True)\n",
" assert verbalizer_id != tokenizer.unk_token_id, \"verbalization was tokenized as <UNK>\"\n",
" m2c_tensor[label_idx, verbalizer_idx] = verbalizer_id\n",
" return m2c_tensor\n"
]
},
{
"cell_type": "code",
"execution_count": 95,
"id": "b98e71c0-9d78-436c-9c41-e7ed40baffa3",
"metadata": {},
"outputs": [],
"source": [
"m2c = mlm_logits_to_cls_logits_tensor()"
]
},
{
"cell_type": "code",
"execution_count": 112,
"id": "cb62a029-1308-461f-8b64-62a6875bb962",
"metadata": {},
"outputs": [],
"source": [
"device='cpu'"
]
},
{
"cell_type": "code",
"execution_count": 97,
"id": "bcfb7c40-7618-4446-97e5-d27d34ae7521",
"metadata": {},
"outputs": [],
"source": [
"logits = prediction_scores"
]
},
{
"cell_type": "code",
"execution_count": 113,
"id": "4c967998-0a8a-498e-83be-3831b2c5b938",
"metadata": {},
"outputs": [],
"source": [
"filler_len = torch.tensor(\n",
" [len(VERBALIZER[label]) for label in label_list],\n",
" dtype=torch.float\n",
")\n",
"filler_len = filler_len.to(device)\n",
"\n",
"# cls_logits.shape() == num_labels x max_fillers (and 0 when there are not as many fillers).\n",
"cls_logits = logits[0,24,torch.max(torch.zeros_like(m2c), m2c)]\n",
"cls_logits = cls_logits * (m2c > 0).float()\n",
"\n",
"# cls_logits.shape() == num_labels\n",
"cls_logits = cls_logits.sum(axis=1) / filler_len\n",
"#return cls_logits"
]
},
{
"cell_type": "code",
"execution_count": 114,
"id": "1c9cac8c-7dcb-47f5-bccb-83e98b422b3c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([2.9357, 6.5621], grad_fn=<DivBackward0>)"
]
},
"execution_count": 114,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cls_logits"
]
},
{
"cell_type": "code",
"execution_count": 110,
"id": "c1d90c99-838f-4b51-82ac-2c111b5d1ad1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[2.9357],\n",
" [6.5621]], grad_fn=<IndexBackward>)"
]
},
"execution_count": 110,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"logits[0,24,m2c]"
]
},
{
"cell_type": "code",
"execution_count": 103,
"id": "4cbb24ec-c28a-4505-9be5-7ee31c746a82",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[2919],\n",
" [2204]])"
]
},
"execution_count": 103,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"m2c"
]
},
{
"cell_type": "code",
"execution_count": 84,
"id": "ce704a4f-1dab-4dc1-b439-8a52f409221d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"10392"
]
},
"execution_count": 84,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.argmax(prediction_scores[0,24]).item()"
]
},
{
"cell_type": "code",
"execution_count": 69,
"id": "a3d0e165-72dc-49f4-b935-da7ddde6d137",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([1, 27, 30522])"
]
},
"execution_count": 69,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prediction_scores.shape"
]
},
{
"cell_type": "code",
"execution_count": 62,
"id": "6f0e138b-275e-4b82-a8a7-aec555575d61",
"metadata": {},
"outputs": [],
"source": [
"m2c = (torch.ones(3,3)).float()"
]
},
{
"cell_type": "code",
"execution_count": 65,
"id": "d0dc2cf7-7a05-43b8-acf4-788ba132391f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1., 1., 1.],\n",
" [1., 1., 1.],\n",
" [1., 1., 1.]])"
]
},
"execution_count": 65,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.max(torch.zeros_like(m2c),m2c)"
]
},
{
"cell_type": "code",
"execution_count": 64,
"id": "7232b968-5f92-41ea-ae15-b8e994ffb196",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([2, 3])"
]
},
"execution_count": 64,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.tensor([2,3])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0ad694ee-1728-45ab-b575-08499c78ea22",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment