提交 d62f306f 作者: 朱学凯

add code

上级 106fd505
......@@ -4,7 +4,7 @@ import torch
from torch.utils import data
import json
import collections
from sklearn.preprocessing import OneHotEncoder
from torch.utils.data import DataLoader
from subword_nmt.apply_bpe import BPE
import codecs
from tqdm import tqdm
......@@ -44,51 +44,51 @@ def load_vocab(vocab_file):
return vocab
def protein2emb_encoder(x, words2idx_p):
max_p = 152
# t1 = pbpe.process_line(x).split() # split
t1 = x.split(',')
try:
i1 = np.asarray([words2idx_p[i] for i in t1]) # index
except:
i1 = np.array([0])
# print(x)
l = len(i1)
if l < max_p:
i = np.pad(i1, (0, max_p - l), 'constant', constant_values=0)
input_mask = ([1] * l) + ([0] * (max_p - l))
else:
i = i1[:max_p]
input_mask = [1] * max_p
return i, np.asarray(input_mask)
def drug2emb_encoder(x, dbpe, words2idx_d):
max_d = 50
# max_d = 100
t1 = dbpe.process_line(x)
t1 = t1.split() # split
try:
i1 = np.asarray([words2idx_d[i] for i in t1]) # index
except:
i1 = np.array([0])
# print(x)
l = len(i1)
print(i1)
if l < max_d:
i = np.pad(i1, (0, max_d - l), 'constant', constant_values=0)
input_mask = ([1] * l) + ([0] * (max_d - l))
else:
i = i1[:max_d]
input_mask = [1] * max_d
return i, np.asarray(input_mask)
# def protein2emb_encoder(x, words2idx_p):
# max_p = 152
# # t1 = pbpe.process_line(x).split() # split
# t1 = x.split(',')
# try:
# i1 = np.asarray([words2idx_p[i] for i in t1]) # index
# except:
# i1 = np.array([0])
# # print(x)
#
# l = len(i1)
#
# if l < max_p:
# i = np.pad(i1, (0, max_p - l), 'constant', constant_values=0)
# input_mask = ([1] * l) + ([0] * (max_p - l))
# else:
# i = i1[:max_p]
# input_mask = [1] * max_p
#
# return i, np.asarray(input_mask)
# def drug2emb_encoder(x, dbpe, words2idx_d):
# max_d = 50
# # max_d = 100
# t1 = dbpe.process_line(x)
# t1 = t1.split() # split
# try:
# i1 = np.asarray([words2idx_d[i] for i in t1]) # index
# except:
# i1 = np.array([0])
# # print(x)
#
# l = len(i1)
# print(i1)
#
# if l < max_d:
# i = np.pad(i1, (0, max_d - l), 'constant', constant_values=0)
# input_mask = ([1] * l) + ([0] * (max_d - l))
#
# else:
# i = i1[:max_d]
# input_mask = [1] * max_d
#
# return i, np.asarray(input_mask)
def seq2emb_encoder(input_seq, max_len, vocab):
......@@ -300,7 +300,6 @@ class Data_Gen(data.Dataset):
def __init__(self, train_file):
'Initialization'
# load data
with open(train_file, 'r') as f:
self.seq = f.readlines()
# with open(mask_file, 'r') as f:
......@@ -327,7 +326,7 @@ def get_task(task_name):
"vocab_pair": './config/drug_codes_chembl.txt',
"begin_id": '[CLS]',
"separate_id": "[SEP]",
"max_len": 256
"max_len": 512
}
if task_name.lower() == 'train':
......@@ -345,30 +344,30 @@ def get_task(task_name):
}
return df_test, tokenizer_config
elif task_name.lower() == 'train_z_1':
df = {"sps": './data/train_sps',
"smile": './data/train_smile',
"affinity": './data/train_z_1_ic50',
}
return df, tokenizer_config
elif task_name.lower() == 'train_z_10':
df = {"sps": './data/train_sps',
"smile": './data/train_smile',
"affinity": './data/train_z_10_ic50',
}
return df, tokenizer_config
elif task_name.lower() == 'train_z_100':
df = {"sps": './data/train_sps',
"smile": './data/train_smile',
"affinity": './data/train_z_100_ic50',
}
return df, tokenizer_config
#
# elif task_name.lower() == 'train_z_1':
# df = {"sps": './data/train_sps',
# "smile": './data/train_smile',
# "affinity": './data/train_z_1_ic50',
# }
#
# return df, tokenizer_config
#
# elif task_name.lower() == 'train_z_10':
# df = {"sps": './data/train_sps',
# "smile": './data/train_smile',
# "affinity": './data/train_z_10_ic50',
# }
#
# return df, tokenizer_config
#
# elif task_name.lower() == 'train_z_100':
# df = {"sps": './data/train_sps',
# "smile": './data/train_smile',
# "affinity": './data/train_z_100_ic50',
# }
#
# return df, tokenizer_config
elif task_name.lower() in ['train_mol']:
df_train = "data/tokenize_data/train.tokenize"
......@@ -378,23 +377,19 @@ def get_task(task_name):
"vocab_pair_p": './config/protein_codes_uniprot.txt',
"begin_id": '[CLS]',
"separate_id": "[SEP]",
"max_len": 595
"max_len": 512
}
return df_train, tokenizer_config
elif task_name.lower() == 'test_mol':
df_test = {"sps": './data/test/test_sps',
'seq': './data/test/test_protein_seq',
"smile": './data/test/test_smile',
"affinity": './data/test/test_ic50',
}
df_test = "data/tokenize_data/test.tokenize"
tokenizer_config = {"vocab_file": './config/vocab_mol.txt',
"vocab_pair": './config/drug_codes_chembl.txt',
"vocab_pair_p": './config/protein_codes_uniprot.txt',
"begin_id": '[CLS]',
"separate_id": "[SEP]",
"max_len": 595
"max_len": 512
}
return df_test, tokenizer_config
......@@ -408,7 +403,7 @@ def get_task(task_name):
"vocab_pair_p": './config/protein_codes_uniprot.txt',
"begin_id": '[CLS]',
"separate_id": "[SEP]",
"max_len": 595
"max_len": 512
}
return df_train, df_train_mask, tokenizer_config
......@@ -439,11 +434,15 @@ class Tokenizer(object):
self.vocab = load_vocab(tokenizer_config["vocab_file"])
def seq2emb_encoder_simple(self, input_seq, vocab):
all_ids = []
for i in input_seq:
try:
ids = np.asarray([vocab[i] for i in input_seq])
id = vocab[i]
all_ids.append(id)
except:
ids = np.array([0])
id = vocab["[UNK]"]
all_ids.append(id)
ids = np.asarray(all_ids)
return ids
def convert_token_to_ids(self, seq):
......@@ -473,7 +472,7 @@ class Tokenizer(object):
input = pad_sequence(all_seq_ids, batch_first=True)
# input_ori = pad_sequence(all_seq_ori, batch_first=True)
input_mask = input != 0
input_mask = (input != 0).long()
# input_mask = pad_sequence(all_mask)
# return torch.from_numpy(input).long(), torch.from_numpy(input_mask).long(), torch.from_numpy(token_type_ids).long()
# return input, input_mask, input_ori
......@@ -494,14 +493,34 @@ if __name__ == "__main__":
# vocab = load_vocab(vocab_file)
# test train
task = 'train_mol'
data_file, tokenizer_config = get_task(task)
params = {'batch_size': 1,
task = 'pre-train'
data_file, data_mask, tokenizer_config = get_task(task)
dataset = Data_Provide(data_file, data_mask)
tokenizer = Tokenizer(tokenizer_config)
data_loder_para = {'batch_size': 2,
'shuffle': False,
'num_workers': 0
'num_workers': 0,
}
trainset = Data_Encoder_mol(data_file, tokenizer_config)
training_generator = data.DataLoader(trainset, **params)
for i, (len_d, len_p) in tqdm(enumerate(training_generator)):
d = len_d.numpy()[0]
p = len_p.numpy()[0]
data_generator = DataLoader(dataset, **data_loder_para)
all_len = []
m = 0
for i, (seq, seq_mask, affinity) in enumerate(tqdm(data_generator)):
input_random_mask, attention_mask = tokenizer.convert_token_to_ids(seq_mask)
label, _ = tokenizer.convert_token_to_ids(seq)
posi = torch.where(input_random_mask == 1)
target = label[posi]
a = input_random_mask == 4
if torch.sum(a) > 2:
print(torch.sum(a))
# a = seq[0].split()
# b = seq_mask[0].split()
# all_len.append(len(a))
# if len(a) > 512:
# m += 1
# if len(a) != len(b):
# print(seq)
# print(i)
# all_len = np.array(all_len)
# print(np.max(all_len))
# print(np.mean(all_len))
# print(m)
No preview for this file type
......@@ -34,6 +34,6 @@ if __name__ == '__main__':
# pred_dir = f.readline()
# pred_dir = pred_dir.split()[5].split('/')[-1]
# pred_result = './predict/{}/test.txt'.format(pred_dir)
pred_result = './predict/train_mol_lr-1e-5-batch-32-e-30-layer3-0609-e-26/test_mol.txt'
pred_result = './predict/add_pretrain_1019-s-98844/test_mol.txt'
test_label_path = './data/test/test_ic50'
eval(pred_result, test_label_path)
......@@ -1828,11 +1828,14 @@ class Multilayer_perceptron(nn.Module):
def __init__(self, config):
super(Multilayer_perceptron, self).__init__()
self.layer_1 = nn.Linear(config.hidden_size, 1)
# self.drop_out = nn.Dropout()
# self.layer_2 = nn.Linear(512, 256)
# self.layer_3 = nn.Linear(256, 1)
# self.drop_out = nn.Dropout(0.5)
def forward(self, bert_output):
# x = self.drop_out(bert_output)
x1 = self.layer_1(bert_output)
# x2 = self.drop_out(x1)
# x1 = F.relu(x1, inplace=True)
# x1 = self.drop_out(x1)
# x2 = self.layer_2(x1)
......@@ -1840,6 +1843,7 @@ class Multilayer_perceptron(nn.Module):
# x2 = self.drop_out(x2)
# x2 = self.layer_3(x1)
# return x2
return x1
......
......@@ -12,14 +12,16 @@ torch.set_default_tensor_type(torch.DoubleTensor)
def train(args, model, dataset, tokenizer):
def train(args, model, dataset, tokenizer, pre_train=False):
data_loder_para = {'batch_size': args.batch_size,
'shuffle': True,
'num_workers': args.workers,
}
data_generator = DataLoader(dataset, **data_loder_para)
data_generator = DataLoader(dataset, **data_loder_para)
if pre_train == True:
model.load_state_dict(torch.load(args.init), strict=True)
model.train()
opt = torch.optim.Adam(model.parameters(), lr=args.lr)
loss_fct = torch.nn.MSELoss()
......@@ -42,12 +44,8 @@ def train(args, model, dataset, tokenizer):
# use cuda
# input model
input_ids, attention_mask = tokenizer.convert_token_to_ids(input)
# pred_affinity = model(input_ids=input_ids.cuda(), token_type_ids=token_type_ids.cuda(), attention_mask=input_mask.cuda())
pred_affinity = model(input_ids=input_ids.cuda(), attention_mask=attention_mask.cuda())
loss = loss_fct(pred_affinity, affinity.cuda().unsqueeze(-1))
# else:
# pred_affinity = model(input_ids=input, token_type_ids=token_type_ids, attention_mask=input_mask)
# loss = loss_fct(pred_affinity, affinity.unsqueeze(-1))
step += 1
writer.add_scalar('loss', loss, global_step=step)
# Update gradient
......@@ -117,7 +115,8 @@ def main(args):
print('task name : {}'.format(args.task))
if args.task in ['train', 'train_z_1', 'train_z_10', 'train_z_100', 'train_mol']:
train(args, model, dataset, tokenizer)
train(args, model, dataset, tokenizer, pre_train=args.pre_train)
# train(args, model, dataset, tokenizer)
elif args.task in ['test', 'test_mol']:
test(args, model, dataset, tokenizer)
......@@ -148,25 +147,28 @@ if __name__ == '__main__':
parser.add_argument('--init', default='model', type=str, help='init checkpoint')
parser.add_argument('--output', default='predict', type=str, help='result save path')
# parser.add_argument('--shuffle', default=True, type=str, help='shuffle data')
parser.add_argument('--do_eval', default=False, type=bool, help='do eval')
# parser.add_argument('--do_eval', default=False, type=bool, help='do eval')
parser.add_argument('--pre_train', default=False, type=bool, help='use pre-train')
args = parser.parse_args()
# local test
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
args.task = 'train_mol'
# args.savedir = 'local_test_train'
args.savedir = 'train'
args.epochs = 10
args.savedir = 'without-pre-train-layer-6-1021'
# # args.savedir = 'train'
args.epochs = 30
args.lr = 1e-5
args.config = './config/config_layer_6_mol.json'
args.pre_train = False
# args.init = './model/mask-LM-lr-1e-4-1019/epoch-17-step-593064-loss-0.1007341668009758.pth'
# args.task = 'test'
# args.init = './model/lr-1e-5-batch-32-e-10-layer6-0428/epoch-8-step-74133-loss-0.6730387237803921.pth'
# args.output = './predict/test'
# args.config = './config/config_layer_6.json'
# args.task = 'test_mol'
# args.init = './model/add_pretrain_1019/epoch-2-step-98844-loss-0.5473515232986019.pth'
# args.output = './predict/add_pretrain_1019-s-98844'
# args.config = './config/config_layer_6_mol.json'
main(args)
......@@ -7,7 +7,7 @@ from modeling_bert import BertAffinityModel, BertAffinityModel_MaskLM
from torch.utils.tensorboard import SummaryWriter
import os
from tqdm import tqdm
# torch.set_default_tensor_type(torch.DoubleTensor)
torch.set_default_tensor_type(torch.DoubleTensor)
......@@ -42,17 +42,13 @@ def train(args, model, dataset, tokenizer):
for i, (seq, seq_mask, affinity) in enumerate(data_generator):
input_random_mask, attention_mask = tokenizer.convert_token_to_ids(seq_mask)
label, _ = tokenizer.convert_token_to_ids(seq)
# pred_affinity = model(input_ids=input.cuda(), token_type_ids=token_type_ids.cuda(), attention_mask=input_mask.cuda())
# assert input_random_mask.size() == label.size(), "{}".format(seq_mask)
logits = model(input_ids=input_random_mask.cuda(), attention_mask=attention_mask.cuda())
# loss = 0
posi = torch.where(input_random_mask == 1)
pred_logits = logits[posi]
target = label[posi]
loss = loss_fct(pred_logits, target.cuda())
# else:
# pred_affinity = model(input_ids=input, token_type_ids=token_type_ids, attention_mask=input_mask)
# loss = loss_fct(pred_affinity, affinity.unsqueeze(-1))
step += 1
writer.add_scalar('loss', loss, global_step=step)
# Update gradient
......@@ -60,9 +56,7 @@ def train(args, model, dataset, tokenizer):
loss.backward()
opt.step()
# if (i % 100 == 0):
print('Training at Epoch ' + str(epoch + 1) + ' step ' + str(step) + ' with loss ' + str(
loss.cpu().detach().numpy()))
print('Training at Epoch ' + str(epoch + 1) + ' step ' + str(step) + ' with loss ' + str(loss.cpu().detach().numpy()))
# save
if epoch >= 1 and step % save_step == 0:
save_path = './model/' + args.savedir + '/'
......@@ -137,7 +131,7 @@ if __name__ == '__main__':
parser = ArgumentParser(description='BertAffinity')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N',
help='mini-batch size (default: 16), this is the total '
help='mini-batch size (default: 8), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
......@@ -160,9 +154,9 @@ if __name__ == '__main__':
args = parser.parse_args()
# local test
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
args.task = 'pre-train'
args.savedir = 'mask-LM-lr-1e-4-1019'
args.savedir = 'mask-LM-layer-6-dobule-1020'
# args.savedir = 'train'
args.epochs = 30
args.lr = 1e-4
......
from PIL import Image
import os
from dataset import Data_Encoder, get_task, Data_Encoder_mol, Data_Encoder_LM, Tokenizer, Data_Provide
img_1_path = "E:\FIVN论文\照片\朱学凯.jpeg"
filename = "E:\FIVN论文\照片\朱学凯_gray.jpeg"
tokenizer_config = {"vocab_file": './config/vocab_mol.txt',
"vocab_pair": './config/drug_codes_chembl.txt',
"vocab_pair_p": './config/protein_codes_uniprot.txt',
"begin_id": '[CLS]',
"separate_id": "[SEP]",
"max_len": 595
}
tokenizer = Tokenizer(tokenizer_config)
a = '[CLS] [MASK] (=CC =C 1)C [MASK] )N2 C(=C )C(= C(C2=O) C#N )C(C)C [SEP] MMK RR WS NN GG FAAL KML EE SSSE VTSS SN GLVL SSD [MASK] MS PS SLDS PV YGD QE MWL CN DS ASY NNS [MASK] HS VI TSL QG CTS SLP AQ TT II [MASK] ALP NSNN [MASK] NN QN QN YQ NG [MASK] MN T NLS VN TNN SV GGGG [MASK] VPG MT [MASK] NGL GGGG [MASK] QVN NHN HS HN HL HH NSNS NHS NSSS HH TNG HMG IGG GGG GLS VN ING PNI VS [MASK] QL NSL QAS QNG QVI HAN IG IHS IIS NGL N HHHH HH MNN [MASK] [MASK] HH TP R SES [MASK] SI SSG [MASK] SP SSSL NG FS TSD AS DVKK IKKG [MASK] RLQE EL CL VCG DR [MASK] YH [MASK] AL TC EG CKG FF RRS VT KN AV YC CK FG H ACE MD MY MRR KC QE CRL [MASK] CL [MASK] MR PE CVV [MASK] NQ CAM K RRE KK [MASK] KEKD KI QTS [MASK] AT EI KKE ILDL MT CE PPS HP TCP LLPE [MASK] AK CQ AR NIP PLS YN QLAV IY KL IW YQ DG YE QPS EEDL KR IM SSP DE NES QHD AS FRH ITE IT IL TV QL IVE FAK GLP AF TK IP QE DQ IT LLK AC [MASK] VM ML RM AR RYD [MASK] DS IF FAN NRS [MASK] DS YKM AG MAD NIE DLL HF CRQ [MASK] MK VD [MASK] YALL TAI VI FSD RP GLEE AEL VEAI QS [MASK] IDTL RIY IL NRH CG DP MSL VF FAK LLS IL TEL RTLG NQN AE M CF SLKL KN [MASK] [MASK] FLEE [MASK] VH AI PPS VQ SH IQ AT QAE KAA QEAQ ATTS AIS AAAT SSSS IN TS [MASK] SSSS SLS PS AAS TP NGG [MASK] DY VG TD MS MSL VQ SD [MASK] [SEP]', '[CLS] CC(=N)N 1 C(=N)C =CC2 =C1 C=C(C [MASK] 2)C3 =CC=C(C =C 3)C4 [MASK] )N [MASK] =CC =CC=C 5 [SEP] MS DV AI VKEG WL HK RG [MASK] IK TW RP RY [MASK] KN [MASK] [MASK] YKE RP QD VD [MASK] APL NN FS VAQ CQL MK TE RP RP NTF IIR CL QW TT VIER TF HVE [MASK] REE WTT [MASK] QTV ADGL KK QEE [MASK] MD FR SGS PS DN SG AEE ME VSL AK PKH [MASK] MNE FE [MASK] KLLG [MASK] TFG KVIL VKE KATG RY [MASK] KIL KKE VI VAK DE VAH TLTE NR [MASK] QNS RHP FL TAL KYS FQT HD RL [MASK] ME YAN GGEL FF [MASK] [MASK] RVF [MASK] [MASK] AR FYG AE [MASK] ALD YL [MASK] KNVV YR DL KLE NL ML DK DG HIK [MASK] DF GL C KEG IKDG AT MKT FCG TPE YL [MASK] VLED ND YG RAV DW W GLG VV MYE MM [MASK] RLP FY NQD HE KL [MASK] IL MEE IR FP RTLG PE AKS LLSG LLKK DP KQRL GGG SED AKEI MQH RF FAG IV WQH VYE KKLS PP FKP QVT SE TD TR YFD [MASK] FT AQ MIT IT PP DQ DDS ME CVD SE RR PH FP QFS YS ASG TA [SEP]', '[CLS] CCCN(CC C1 (CC(C 1)NC(=O) [MASK] =CC3 =CC =CC=C 3C=C 2) O)C 4CC 4C5 =CC=C(C =C 5)C l [SEP] M [MASK] QLSS HL [MASK] TCG AE NS TGAS QAR PH AY YAL SY CAL ILAI VFG NGL VC MAVL KE RAL QTT TN YL VV [MASK] ADLL VATL [MASK] PW VV YLE VTGG VW NFS RI C CD VF VTL D VM [MASK] [MASK] [MASK] NL C AIS ID RY [MASK] VM PVH YQ HG TG QSS CRR VAL MIT AV WVL AF AVS CP LLFG FN TTG [MASK] T [MASK] IS NPD FVI [MASK] VV SF [MASK] PFG VTVL [MASK] AR IY VVL KQ RR [MASK] IL TR QNS QC NS VRPG FP QQ TLS PDP AHL ELKR YYS IC QD TAL GGPG FQE [MASK] ELKR EE KT RN SLSP [MASK] AP KL SLE VR KLS NG RLST SL KLG PL QP [MASK] [MASK] REKK AT QMV AI VLG AF IVC WL PF FL TH VL NT HC QT CH VS PEL YS AT T WLG Y VNS AL [MASK] VI YTT FN IE FRK AFL KILS C [SEP]', '[CLS] C1CC1 C(=O)N C2=C 3C =C( [MASK] C=N 2)C [MASK] N =CC=C 4 [SEP] MSG RP RTT SF AES CKP VQQ PS AFGS MK [MASK] KDG SK VT TVV ATPG QG PD RP [MASK] VS [MASK] TK VIG NGS [MASK] VV YQ AKL CD SGEL VAI KKVL QD KR [MASK] REL [MASK] MR KL DH CN IV RL RY FF YSS GE KK [MASK] [MASK] NLVL DY VPE TVY RVAR [MASK] RAK [MASK] PVI Y VKL Y MY QL FR SLAY IHS FG [MASK] HRD IK PQN LLLD [MASK] TAVL KL CD FGS AKQL VRGE PN VS YI CS RY YR [MASK] IFG AT DY TSS ID [MASK] AG CVL AE LLLG QP IF PGD [MASK] QLVE IIK VLG TP TRE QIRE MNP NY [MASK] FKF PQ IK [MASK] PW TK VF RP RTP PE AIAL CS RLLE YTP TARL TPLE AC AH SF FD ELRD PN VKL PNG RD TP AL FN FTT [MASK] SS NP PLAT IL IP PH AR IQ AAAS TP TN [MASK] [MASK] AN TGD RG QT NN AAS ASAS NS T [SEP]', '[CLS] C1C (CN C1=O)C(=O)N C2=N N(C (=C2 [MASK] =CC(= CC=C 3) OCC [MASK] 4=CC =C(C=C [MASK] l [SEP] MD SS TW SP [MASK] AVTR PVE TH ELIR N AAD IS II VI YF VV VM AVGL WAM FS TN RG TVGG FFL AGRS MVW WP IG [MASK] FAS NIG [MASK] HF VGL AGTG AASG IAI GG FE WN ALVL VV VLG WL FVP IY IK AGVV TM [MASK] YL RKR [MASK] [MASK] IQ [MASK] YLS LLS LLL YIF TK IS AD IF SGAI FI NL ALGL NL YL AI FLLL AI TAL YTI TG GLAA VI YTD TL QT VI [MASK] VG SLIL [MASK] FAF [MASK] VGG YD AF ME KY MK AI PTI VSDG NTT [MASK] KC YTP [MASK] SF HIF RD PLTG DL PW PG FIFG MS ILTL W YW CTD QVI VQ R CLS AK [MASK] HVK [MASK] CIL CG YL [MASK] MP MFI MVM PG MIS RIL YTE KI AC VV PSE CE KY CG TK VG CTN IAY PTL VVEL MP NGL RGL ML SV ML ASL MS SLTS [MASK] NS [MASK] FT [MASK] IY AKVR KR AS EK EL [MASK] AGRL FIL [MASK] IS IAW VP I VQS AQSG QL FD YI [MASK] [MASK] [MASK] PP IAA VF LLAI FW KR VNE PGAF WGL ILG LLIG [MASK] RM IT EF AYG TGS CME PS NC PT II CG VH YL YF AIIL FAI SF IT IVV ISLL TKP IP DV HL YRL CW SL RNS [MASK] RI DLD [MASK] NI QEG PKE TIE IE TQ VPE KKKG IF RR AY DL FC GL EQ HG AP KMT EEEE KAM KMK [MASK] TSE KPL [MASK] VL NV NG IIL VT VAV FC HAY F [MASK] [SEP]', '[CLS] CCN(CC) CC1=CC =C(C=C 1)C [MASK] 2CC3 [MASK] C(C =C3 C2=O) OC) OC [SEP] MR PP [MASK] PL HTP SLAS PLL F [MASK] LL GGG AR AEG RE DP QLL VR VRGG QLRG IRL [MASK] GG PVS AFLG [MASK] FAE PP VGS RR FMP PE PKR [MASK] [MASK] ILD ATTF QN [MASK] YQY VD TL YPG FEG TE MW NP NR EL [MASK] CL YL NVW TP YP RP [MASK] TP VL IW IY GGG [MASK] SGAS SLD [MASK] DG RFL AQ VEG TVL VS [MASK] YR VG TFG FLAL [MASK] RE APG NVG LLDQ RLAL QW VQE NI AA FGG DP MS VTL [MASK] AG [MASK] VG [MASK] ILSL [MASK] RSL FHR AVL QSG TP NG PW ATVS AGE ARRR ATLL ARL VG [MASK] GG AGG ND TEL IS [MASK] RTR PAQ DLVD HE WH VLP QES IF RFS FVP VV DGD FLSD TP DAL IN TG DF QDL QVL VGVV KDE GS YFL VYG VPG FSKD NE SLIS RAQ [MASK] VRIG VP QAS DL AAE AVVL HY TD WL HPE DP AHL RD AMS AV VGD HN [MASK] CPV AQL AG RLAA QG AR VY AY IFE HR ASTL TW PL WMG VP HG YE [MASK] FIF GLPL DP SL NY TVEE RIF AQRL MQY WTN FAR TG DP ND PR DS KS [MASK] WPP YTT AAQQ Y VSL NL KPL EV RR GL RAQ TC AF WNR FL PK LLS ATD TLDE AERQ W KAE FHR WSS YM VH WKN QFD HYS KQE RC SD L [SEP]', '[CLS] C1 C(= CC2 =CC=CC [MASK] 2)C(=O) [MASK] CC3 =CC=CC =N 3)C S1 [SEP] MLS NS QG QS [MASK] VP FP APAP PP QPP TP AL [MASK] PP [MASK] PPPPPP QQ FP [MASK] H VKS [MASK] QIKK NAI ID DY KVT SQ VL GLG [MASK] KVL QIF NKR TQE KF [MASK] KML QD CPK ARRE VEL H WR AS QCP HI VR IVD VYE NL YAG RK CLL IV ME [MASK] GGEL FS R IQD RG DQ AF TE RE AS EI MKS IGE AI QYL HS IN IAH RD VKPE NLL YTS [MASK] PN AIL KLTD FG F AKE [MASK] HN SLTT P CY TP YY VAPE VLG PE KYD KS CD [MASK] SLG VI [MASK] LL [MASK] YPP FYS [MASK] GL AIS PG MKT RIR MG QYE FP [MASK] WSE VSEE VK ML IR [MASK] KTE PT QR [MASK] IT EF MN HP W IM QS TK VP QTPL HTS [MASK] KED KE RW ED VKEE MTS ALAT MR VD YE QIK IKK IED AS NP LLL KRR [MASK] ALE AAAL AH [SEP]', '[CLS] CC1=C2 C(=N C(=N C2=N C=C 1CC 3=C(C =CC(= C3 [MASK] CCCCC(=O)O) OC)N [MASK] [SEP] MVR PL NC IV AVS QN MG IG KNG DLP [MASK] LL RNE FKY FQR MTT TSS VEG KQ NL VI MG RKT WFS IPE KN [MASK] KD RIN IVL [MASK] ELKE PP QG AH FLAK SLDD AL KLIE QP ELAS KVD MVW VV GGSS VY QE [MASK] NQ PG HL RL FVT [MASK] MQE FE SD TF FPE IDLE KY KLL PE YPG VLSE IQEE KGIK YK FE [MASK] KK D [SEP]'
def RGBtoGray(path, save):
# files = os.listdir(path)
# for file in files:
# imgpath = path + '/' + file
# #print(imgpath)
#
img = Image.open(path).convert('L')
#resize将图像像素转换成自己需要的像素大小
# img = im.resize((512, 512))
# dirpath = filename
# file_name, file_extend = os.path.splitext(f)
# dst = os.path.join(os.path.abspath(dirpath), file_name + '.jpg')
img.save(save)
if __name__ == "__main__":
RGBtoGray(img_1_path, filename)
ids, mask = tokenizer.convert_token_to_ids(a)
label = ids[ids == 1]
print([len(i.split()) for i in a])
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论